repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
louisgogo/Django
https://github.com/louisgogo/Django
0859ec3f89355f072fb74c522351af1e91bc0470
044e65be6adede2550f6ff61adaa31a2ac2d949b
4a0815af121ebf3284bbca925b5102f1007b9ddd
refs/heads/master
2021-01-12T04:53:58.058900
2017-01-03T15:21:26
2017-01-03T15:21:26
77,807,451
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6555555462837219, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 10.375, "blob_id": "6f4e40c9820801cfd902ea75af8ecdd4704047c6", "content_id": "7d002855109a3079dd691c49e93b06c60257068c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 96, "license_type": "no_license", "max_line_length": 20, "num_lines": 8, "path": "/Test.py", "repo_name": "louisgogo/Django", "src_encoding": "UTF-8", "text": "#encoding: utf-8\n'''\nCreated on 2017年1月2日\n\n@author: larkjoe\n'''\nimport django\ndjango-admin" } ]
1
AaronChiu2017/ohmydata_spider
https://github.com/AaronChiu2017/ohmydata_spider
a62098638a7aa00335fac4e897004d1c9852b38f
897ae443e404c35b6ca386550aaa0107d3c10563
fb2cbd363dc45e8c3f079177cef4f8ea3c5862db
refs/heads/master
2020-11-26T19:41:48.047763
2016-04-21T08:07:45
2016-04-21T08:07:45
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4736842215061188, "alphanum_fraction": 0.4736842215061188, "avg_line_length": 18, "blob_id": "62664ecaf9202b93c2c9341cad63d6825a96f78b", "content_id": "c5399bf1602259867170cf57e3cb1a540746f834", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19, "license_type": "no_license", "max_line_length": 18, "num_lines": 1, "path": "/ohmydata_spider/commands/__init__.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "__author__ = 'mee'\n" }, { "alpha_fraction": 0.6320530772209167, "alphanum_fraction": 0.633001446723938, "avg_line_length": 23.811763763427734, "blob_id": "c895cbc508a10b42ea8f184d5ef484368ccb0fff", "content_id": "905f449c10b6a37550897da828ae8c2d0ea5402e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2109, "license_type": "no_license", "max_line_length": 67, "num_lines": 85, "path": "/ohmydata_spider/pipelines.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/topics/item-pipeline.html\n\nfrom scrapy.exceptions import DropItem\nfrom scrapy.conf import settings\nimport logging\nimport pymongo\nimport functools\n\n\ndef check_spider_pipeline(process_item_method):\n\n @functools.wraps(process_item_method)\n def wrapper(self, item, spider):\n\n # message for debugging\n msg = '%%s %s pipeline step'%(self.__class__.__name__,)\n logger = logging.getLogger(spider.name)\n if self.__class__ in spider.pipeline:\n logger.info(msg % 'executing')\n return process_item_method(self, item, spider)\n else:\n return item\n\n return wrapper\n\n\nclass MongoDBPipeline(object):\n\n def __init__(self):\n connection = pymongo.MongoClient(\n settings['SingleMONGODB_SERVER'],\n settings['SingleMONGODB_PORT']\n )\n db = connection[settings['MONGODB_DB']]\n self.collection = db[settings['MONGODB_COLLECTION']]\n\n @check_spider_pipeline\n def process_item(self, item, spider):\n valid = True\n for data in item:\n if not data:\n valid = False\n raise DropItem(\"Missing {0}!\".format(data))\n if valid:\n self.collection.insert(dict(item))\n self.logger.info(\"proxy ip added to MonogoDB database\")\n\n return item\n\n\nclass DataTreasurePipeline(object):\n\n @check_spider_pipeline\n def process_item(self, item, spider):\n print 'book name is :' + item['book_name']\n print 'book description is ' + item['book_description']\n\n return item\n\n\nclass JdBookPipeline(object):\n\n @check_spider_pipeline\n def process_item(self, item, spider):\n return item\n\n\nclass TmallCommentPipeline(object):\n\n @check_spider_pipeline\n def process_item(self, item, spider):\n return item\n\n\nclass WeiboPipeline(object):\n\n @check_spider_pipeline\n def process_item(self, item, spider):\n return item\n" }, { "alpha_fraction": 0.6413301825523376, "alphanum_fraction": 0.6532066464424133, "avg_line_length": 17.30434799194336, "blob_id": "e74eb00d81cee51c8b8c18c903806109ddbab71f", "content_id": "bd863f4cfc86478df652a133ee4c75a42d58ab02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 421, "license_type": "no_license", "max_line_length": 47, "num_lines": 23, "path": "/ohmydata_spider/commands/clear_redis_stats.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n__author__ = 'mee'\n\nimport redis\n\nREDIS_HOST = 'localhost'\nREDIS_PORT = 6379\nFILTER_KEY = 'myspider:dupefilter'\nREQUEST_KEY = 'myspider:requests'\nSTATS_KEY = 'scrapy:stats'\n\n\ndef clear_stats():\n server = redis.Redis(REDIS_HOST,REDIS_PORT)\n server.delete(FILTER_KEY)\n server.delete(REQUEST_KEY)\n server.delete(STATS_KEY)\n\n\nif __name__ == '__main__':\n clear_stats()\n" }, { "alpha_fraction": 0.6577208638191223, "alphanum_fraction": 0.6948802471160889, "avg_line_length": 44.566036224365234, "blob_id": "23db0b14de6e646353e72685c011e7a85bcea3c6", "content_id": "54b997fcb2c0f00b426851be611d91a625801365", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2472, "license_type": "no_license", "max_line_length": 565, "num_lines": 53, "path": "/ohmydata_spider/spiders/myspider.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nfrom scrapy_redis.spiders import RedisSpider\nfrom scrapy.selector import Selector\nfrom ohmydata_spider.util.select_result import list_first_item,clean_url\nfrom scrapy.http import Request\nfrom ohmydata_spider.items import TutorialItem\nimport ohmydata_spider.pipelines\n\n\nclass MySpider(RedisSpider):\n name = 'myspider'\n start_urls = ('http://www.kjson.com/proxy/index/1',)\n\n pipeline = set([\n ohmydata_spider.pipelines.MongoDBPipeline,\n ])\n\n proxy_porturl = {\n \"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAIAAAAC64paAAAB1UlEQVQ4jdWTzcspYRjGH9OxGdLIRhTNQhYWSKmxGxtJxrBQNvwDSlnxH8hCNkgWsrSwEU1Z2dhbKJqGRvlI81FY6NH0dBbzHm9HZ+Ws3mv13Pdz/+q+rroNEELwqbCPyZ8L/3q9eJ5vtVqr1Qoh5Pf7y+Wyy+UCACCE2u32ZDJBCDEMUywWv2kIIYRQVVWaphuNhqqql8ulWq2mUin9q9frsSy73+83m000Gh0Oh/CPvtY+Ho+32y2Xy5lMJoIg4vH4+XxGCAEARqNRPp+32+0kSTIMw3Hcu2eSJB0OR7fb1TTter0OBoNYLIZhmCRJkiT5fD59zOv1CoLwDhuNxk6nI4piKpViWdbj8VQqFQCALMsAAKvVqo8RBPF4PDRNe097sVhst9t0Oh2JRDiOWy6XeloAAAz7GjMYDH/FrVuXZZmiqOl0qpf1ej2ZTEIIBUEIhUK73U7vz2YzmqbfAzscDs/nMxgM6iVFUafTCSHkdDpxHF+v13p/s9m8/H+v7Xa7zWZzv9+/3++KogyHw0AggGEYhmGJRGIwGCiKIorieDzOZrMv2PC6qtVq1Ww2eZ7HcTwcDpdKJZvNpvuq1Wrz+dxisRQKhUwm8w/4A/3Qq/ov+Dc2O/z/LmddcAAAAABJRU5ErkJggg==\":'80',\n }\n\n def parse(self, response):\n response_sel = Selector(response)\n\n next_link = list_first_item(response_sel.xpath(u'//div[@class=\"page\"]/a[text()=\"下一页\"]/@href').extract())\n\n if next_link:\n next_link = clean_url(response.url, next_link, response.encoding)\n yield Request(url=next_link, callback=self.parse)\n\n print next_link\n # 必须使用for循环来调用parse_detail函数,否则只能解析第一个界面\n for item in self.parse_detail(response):\n yield item\n\n def parse_detail(self, response):\n response_sel = Selector(response)\n\n table_bodys = response_sel.xpath('//*[@id=\"dataTables-example\"]/tbody/tr')\n\n for table_body in table_bodys:\n proxy_item = TutorialItem()\n port_url = str(list_first_item(table_body.xpath('./td[2]/img/@src').extract())).split('&')[0]\n\n if port_url in self.proxy_porturl:\n proxy_item['proxy_url'] = list_first_item(table_body.xpath('./td[1]/text()').extract()) + ':' + self.proxy_porturl[port_url]\n proxy_item['proxy_type'] = list_first_item(table_body.xpath('./td[3]/text()').extract())\n proxy_item['proxy_locate'] = list_first_item(table_body.xpath('./td[7]/text()').extract())\n else:\n continue\n yield proxy_item\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5887305736541748, "alphanum_fraction": 0.5939119458198547, "avg_line_length": 31.16666603088379, "blob_id": "354a9e2507d25e9cef98cd2ac37248577faf2535", "content_id": "35202f41bda78b0abcb59031f7dd9a658e35ec26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1544, "license_type": "no_license", "max_line_length": 119, "num_lines": 48, "path": "/start_cluster.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\nimport multiprocessing\nimport argparse\nimport pexpect\nimport sys\n\nclass sshProcess(multiprocessing.Process):\n def __init__(self, workerid, hostname, password, crawlername):\n multiprocessing.Process.__init__(self)\n self.workerid = workerid\n self.hostname = hostname\n self.password = password\n self.crawlername = crawlername\n\n def run(self):\n server = pexpect.spawn('ssh %s cd ~/cpython/ohmydata_spider;scrapy crawl %s'%(self.hostname, self.crawlername))\n fout = file(self.workerid+'.log', 'w')\n server.logfile = fout\n server.expect('.*ssword:')\n server.sendline(self.password)\n server.expect(pexpect.EOF)\n\ndef main():\n parse = argparse.ArgumentParser()\n parse.add_argument('--worker', help='input the number of you want run in the worker', type=int,default=1)\n parse.add_argument('--crawlername', help='input the cralwer name that you want running', type=str,default=\"\")\n args = parse.parse_args()\n worker = args.worker\n crawlername = args.crawlername\n\n \n config = open('cluster.config', 'r')\n for line in config:\n info = line.split(' ')\n if len(info) == 3:\n workerid = info[0]\n hostname = info[1]\n password = info[2]\n \n i = 0 \n while i < worker:\n p = sshProcess(workerid, hostname, password, crawlername)\n p.start()\n i = i + 1\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.8302469253540039, "alphanum_fraction": 0.8302469253540039, "avg_line_length": 9.387096405029297, "blob_id": "e92763c29bd7b74e614b5828fc30ea684c713d1d", "content_id": "336c83ba4133bc9a5a7472aa3e206a499326473a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 560, "license_type": "no_license", "max_line_length": 73, "num_lines": 31, "path": "/ReadMe.txt", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "\n项目描述:\n分布式爬虫爬取免费代理服务器ip地址,获取数据后将其存储在mongodb中,通过web界面展示获取到的数据,并能够在web界面上对相应字段进行查询\n\n\n分布式实现:\nredis\n\n存储:\nmongodb\n\n爬虫状态显示:\ngraphite\n\nTODO\nweb界面展示部分(待做):\nflask\n\npython库依赖问题\n解决思路\nvirtualenv virtualenvwrapper管理python 库依赖问题\n\nTODO\n部署方式\ndocker\n\nmongodb存储字段\ndatabase:proxyip_data\ncollection:proxyip_collection\n\n存储内容包括\nip 端口 类型 所在地区\n\n" }, { "alpha_fraction": 0.5950919985771179, "alphanum_fraction": 0.6104294657707214, "avg_line_length": 26.08333396911621, "blob_id": "330eb3a0b6fe03ddbf11f495bf36c4f506de0a74", "content_id": "1c854d5ea5654c7900933ba8d352cdd1243594ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "no_license", "max_line_length": 63, "num_lines": 12, "path": "/readunicode.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\nimport codecs\n\nf = open('TmallComment.json', 'r')\nf2 = open('allTmallComment.json', 'w')\nfor text in f:\n try:\n f2.write(text.decode('unicode-escape').encode('utf-8'))\n print text.decode('unicode-escape').encode('utf-8')\n except Exception, e:\n print 'error'\n\n" }, { "alpha_fraction": 0.7528656721115112, "alphanum_fraction": 0.7767079472541809, "avg_line_length": 25.289155960083008, "blob_id": "435036579cf4c6932c603ccfad46a67b82e8d396", "content_id": "2b432f316a617c8fb56930af8b8b1c73f1fb582a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2423, "license_type": "no_license", "max_line_length": 125, "num_lines": 83, "path": "/ohmydata_spider/settings.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n# Scrapy settings for ohmydata_spider project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/topics/settings.html\n#\n\nSPIDER_MODULES = ['ohmydata_spider.spiders']\nNEWSPIDER_MODULE = 'ohmydata_spider.spiders'\nITEM_PIPELINES = {\n 'ohmydata_spider.pipelines.DataTreasurePipeline': 100,\n 'ohmydata_spider.pipelines.MongoDBPipeline': 200,\n 'ohmydata_spider.pipelines.JdBookPipeline': 300,\n 'ohmydata_spider.pipelines.TmallCommentPipeline': 400,\n}\n\n# 设置等待时间缓解服务器压力,并能够隐藏自己\nDOWNLOAD_DELAY = 2\n\nRANDOMIZE_DOWNLOAD_DELAY = True\n\n# 关闭默认的s3下载处理器\nDOWNLOAD_HANDLERS = {'s3':None,}\n\n# 并发请求最大值\nCONCURRENT_REQUESTS = 64\n\n# 单个域名并发请求最大值\nCONCURRENT_REQUESTS_PER_DOMAIN = 32\n\n# 下载中间件设置,下载中间件用于修改全局scrapy request和response.\nDOWNLOADER_MIDDLEWARES = {\n 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware':110,\n 'ohmydata_spider.contrib.downloadermiddleware.selector_proxy.SelectorProxyMiddlerware':100,\n 'scrapy.extensions.downloadermiddlewares.useragent.UserAgentMiddleware':None,\n 'ohmydata_spider.contrib.downloadermiddleware.rotate_useragent.RotateUserAgentMiddleware':400,#将中间件中的user_agent修改为自己实现的部分\n # 'ohmydata_spider.contrib.downloadermiddleware.Cookie.CookiesMiddleware':401,\n}\nUSER_AGENT = ''\n\n# 爬虫状态信息\nSTATS_CLASS = 'ohmydata_spider.scrapy_graphite.graphite.RedisGraphiteStatsCollector'\n\n# graphite 设置\nGRAPHITE_HOST = 'localhost'\nGRAPHITE_PORT = 2003\nGRAPHITE_IGNOREKEYS = []\n\n# 禁用cookie\nCOOKIES_ENABLED = True\n# COOKIES_DEBUG=False\n\n# redis调度器相关设置部分\nSCHEDULER = 'scrapy_redis.scheduler.Scheduler'\n\nSCHEDULER_PERSIST = True\n\nSCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderPriorityQueue'\n\nSCHEDULER_IDLE_BEFORE_CLOSE = 10\n\n# 数据存储部分设置\nSingleMONGODB_SERVER = \"localhost\"\nSingleMONGODB_PORT = 27017\nMONGODB_DB = \"proxyip_data\"\nMONGODB_COLLECTION = \"proxyip_collection\"\n\nShardMONGODB_SERVER = \"localhost\"\nShardMONGODB_PORT = 27017\nShardMONGODB_DB = \"proxyip_mongo\"\nGridFs_Collection = \"proxyip_table\"\n\nREDIS_HOST = 'localhost'\nREDIS_PORT = 6379\n\n\n# 数据序列化到文件\nFEED_URI = u'ProductInfo.csv'\nFEED_FORMAT='CSV'" }, { "alpha_fraction": 0.47354498505592346, "alphanum_fraction": 0.6904761791229248, "avg_line_length": 15.434782981872559, "blob_id": "bb28018de8ea333fa0915afef32e571dc4159484", "content_id": "42450b7166d5c00073175cd3d614da4f6dc24203", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 378, "license_type": "no_license", "max_line_length": 24, "num_lines": 23, "path": "/ohmydata_spider_ENV/requirements.txt", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "cffi==1.3.1\ncharacteristic==14.3.0\ncryptography==1.1.1\ncssselect==0.9.1\nenum34==1.0.4\nidna==2.0\nipaddress==1.0.15\nlxml==3.5.0\npyasn1==0.1.9\npyasn1-modules==0.0.8\npycparser==2.14\npymongo==3.1.1\npyOpenSSL==0.15.1\nqueuelib==1.4.2\nredis==2.10.5\nScrapy==1.0.3\nscrapy-redis==0.6.0\nservice-identity==14.0.0\nsix==1.10.0\nTwisted==15.4.0\nw3lib==1.13.0\nwheel==0.24.0\nzope.interface==4.1.3\n" }, { "alpha_fraction": 0.6397515535354614, "alphanum_fraction": 0.6431394815444946, "avg_line_length": 35.79166793823242, "blob_id": "42f49744ed2fab569354701a1d4ca4c029169656", "content_id": "978c0946e801550a556fd9cf2aa79655d62168c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1777, "license_type": "no_license", "max_line_length": 138, "num_lines": 48, "path": "/ohmydata_spider/spiders/dataspider.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n__author__ = 'mee'\nfrom scrapy_redis.spiders import RedisSpider\nfrom scrapy.selector import Selector\nfrom ohmydata_spider.util.select_result import list_first_item,clean_url\nfrom scrapy.http import Request\nfrom ohmydata_spider.items import DataTreasureItem\nimport ohmydata_spider.pipelines\n\n\nclass DataSpider(RedisSpider):\n\n name = \"ohmygourd\"\n start_urls = (\n 'http://www.woaidu.org/sitemap_1.html',\n )\n\n pipeline = set([\n ohmydata_spider.pipelines.DataTreasurePipeline,\n ])\n\n def parse(self, response):\n response_sel = Selector(response)\n\n next_link = list_first_item(response_sel.xpath(u'//div[@class=\"k2\"]/div/a[text()=\"下一页\"]/@href').extract())\n\n if next_link:\n next_link = clean_url(response.url, next_link, response.encoding)\n yield Request(url=next_link, callback=self.parse)\n\n for detail_link in response_sel.xpath(u'//div[contains(@class,\"sousuolist\")]/a/@href').extract():\n if detail_link:\n detail_link = clean_url(response.url, detail_link, response.encoding)\n print detail_link\n yield Request(url=detail_link, callback=self.parse_detail)\n\n def parse_detail(self, response):\n data_item = DataTreasureItem()\n\n response_selector = Selector(response)\n\n print '********************book name is ' + list_first_item(response_selector.xpath('//div[@class=\"zizida\"][1]/text()').extract())\n data_item['book_name'] = list_first_item(response_selector.xpath('//div[@class=\"zizida\"][1]/text()').extract())\n data_item['book_description'] = list_first_item(response_selector.xpath('//div[@class=\"lili\"][1]/text()').extract())\n\n yield data_item\n\n\n\n\n\n" }, { "alpha_fraction": 0.5574144721031189, "alphanum_fraction": 0.5673003792762756, "avg_line_length": 34.5405387878418, "blob_id": "286536329e8bff854188e228b691a62f225de62e", "content_id": "d435d3c8e53fed53e0234c654cd5b37ca0daf7a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2710, "license_type": "no_license", "max_line_length": 101, "num_lines": 74, "path": "/ohmydata_spider/contrib/downloadermiddleware/selector_proxy.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n__author__ = 'mee'\n\nimport base64\nimport random\nfrom scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware\nimport pymongo\nimport logging\n\n\nclass SelectorProxyMiddlerware(HttpProxyMiddleware): # 中间件继承时一定要有__init__方法,否则不会被执行\n\n # 实例化类时进行数据库连接\n def __init__(self):\n SingleMONGODB_SERVER = \"localhost\"\n SingleMONGODB_PORT = 27017\n MONGODB_DB = \"proxyip_data\"\n MONGODB_COLLECTION = \"proxyip_collection\"\n\n try:\n connection = pymongo.MongoClient(\n SingleMONGODB_SERVER,\n SingleMONGODB_PORT\n )\n\n db = connection[MONGODB_DB]\n self.collection = db[MONGODB_COLLECTION]\n except Exception, e:\n logging.warning(\"connection mongodb error %s\", e.message)\n\n def process_request(self, request, spider):\n\n proxy = self.getproxy_ip(spider.proxy)\n\n if proxy is not None:\n logger = logging.getLogger(spider.name)\n logger.info(\"Select the proxy : %s\" % (proxy['proxy_url']))\n if proxy['user_pass'] is not None:\n request.meta['proxy'] = proxy['proxy_url']\n encoded_user_pass = base64.encodestring(proxy['user_pass']).strip()\n request.headers['Proxy-Authorization'] = 'Basic' + encoded_user_pass\n else:\n request.meta['proxy'] = proxy['proxy_url']\n\n # 随机选取一个代理\n def getproxy_ip(self, proxy_type):\n try:\n if proxy_type == 'http':\n proj = self.collection.find({\"proxy_type\": \"HTTP\"}, {\"proxy_url\": 1})\n proj.skip(random.randint(0, proj.count()))\n proxy_info = proj.limit(-1).next()\n proxy_dict = {'proxy_url': \"http://%s\"%(proxy_info['proxy_url']), \"user_pass\": None}\n\n elif proxy_type == 'https':\n proj = self.collection.find({\"proxy_type\": \"HTTPS\"}, {\"proxy_url\": 1})\n proj.skip(random.randint(0, proj.count()))\n proxy_info = proj.limit(-1).next()\n proxy_dict = {'proxy_url': \"https://%s\"%(proxy_info['proxy_url']), \"user_pass\": None}\n\n elif proxy_type == 'GFW':\n proxy_dict = {'proxy_url': \"http://127.0.0.1:8118\", \"user_pass\": None}\n\n return proxy_dict\n except Exception, e:\n logging.warning(\"Get proxy Exception from mongodb warn info: %s\", e.message)\n return None\n\n\nif __name__ == '__main__':\n test_proxy = SelectorProxyMiddlerware()\n # test_proxy.getproxy_ip()\n # test_proxy.process_request(request=\"\",spider=\"\")\n" }, { "alpha_fraction": 0.6747279167175293, "alphanum_fraction": 0.683192253112793, "avg_line_length": 20.205127716064453, "blob_id": "17415d14f8d0070c9275f60ce42dfc5e53f07fd1", "content_id": "054fe00891d840535a4d862d16a6c941f92d52d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 895, "license_type": "no_license", "max_line_length": 127, "num_lines": 39, "path": "/ohmydata_spider/util/select_result.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport urllib\nimport urlparse\nfrom urlparse import urljoin\nfrom w3lib.html import replace_entities\n\n\ndef clean_link(link_text):\n\n return link_text.strip(\"\\t\\r\\n '\\\"\")\n\n# 返回第一个url地址\nlist_first_item = lambda x:x[0] if x else None \n\n# 将url地址组装返回,并移除空格标点 entites\nclean_url = lambda base_url, u, response_encoding: urljoin(base_url, replace_entities(clean_link(u.decode(response_encoding))))\n\n\n# 获取请求参数\ndef get_query(url, key):\n bits = list(urlparse.urlparse(url))\n query = urlparse.parse_qs(bits[4])\n\n return query[key][0]\n\n\n# 设置请求参数\ndef set_query(url, **args):\n bits = list(urlparse.urlparse(url))\n query = urlparse.parse_qs(bits[4])\n\n for key in args:\n query[key] = args[key]\n\n bits[4] = urllib.urlencode(query, True)\n\n return urlparse.urlunparse(bits)\n" }, { "alpha_fraction": 0.5559091567993164, "alphanum_fraction": 0.566179633140564, "avg_line_length": 39.42689895629883, "blob_id": "1a778218e08c91f3f967db06ea39dd3610e013d4", "content_id": "b1b1083475b1216735aa050b6812cbfc3bc63d67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6943, "license_type": "no_license", "max_line_length": 142, "num_lines": 171, "path": "/ohmydata_spider/spiders/TmallCommentspider.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nfrom scrapy_redis.spiders import RedisSpider\nfrom ohmydata_spider.items import TmallCommentItem\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\n\nfrom ohmydata_spider.util.select_result import get_query, set_query, clean_link\nimport ohmydata_spider.pipelines\nimport re, json\nimport logging\n\n\n__author__ = 'mee'\n\n\nclass TmallCommentSpider(RedisSpider):\n\n name = \"TmallComment\"\n\n start_urls = (\n \"https://nike.world.tmall.com/\",\n \"https://jackjones.world.tmall.com/\",\n \"https://suning.world.tmall.com/\",\n \"https://xiaomi.world.tmall.com/\",\n \"https://only.world.tmall.com/\",\n \"https://uniqlo.world.tmall.com/\",\n \"https://apple.tmall.com/\",\n \"https://adidas.tmall.com/\",\n \"https://newbalance.tmall.com/\",\n \"https://lee.tmall.com/\",\n )\n\n categoryUrl = \"https://suning.world.tmall.com/category-1115569769.htm?search=y&catId=1115569769&pageNo=1\"\n asyncUrl = \"https://suning.world.tmall.com/i/asynSearch.htm?mid=null&wid=null&path=?&&search=y&catId=?&scid=?&pageNo=?\"\n rateUrl = \"https://rate.tmall.com/list_detail_rate.htm?itemId=522155891308&sellerId=2616970884&currentPage=1\"\n\n pipeline = set([\n ohmydata_spider.pipelines.TmallCommentPipeline,\n ])\n\n proxy = 'GFW'\n\n def parse(self, response):\n response_sel = Selector(response)\n\n category = response_sel.xpath(u'//a[contains(@href,\"category\")]/@href').extract()\n sellerid = response_sel.xpath(u'//meta[contains(@content,\"userId\")]/@content').extract()\n\n # get the sellerid and replace it\n sellerId = re.findall(r'userId=(\\d+)', sellerid[0])[0]\n if sellerId:\n self.rateUrl = set_query(self.rateUrl, sellerId=sellerId)\n else:\n self.logger.error(\"Get the sellerid error !\")\n\n domain = re.findall(r'https:(.*)', response.url)[0]\n if domain:\n # replace the request page domain\n self.categoryUrl, result_count = re.subn(r'//(.*?)/', domain, self.categoryUrl)\n self.asyncUrl, result_count = re.subn(r'//(.*?)/', domain, self.asyncUrl)\n else:\n self.logger.error(\"Get the request domain error!\")\n\n all_category = set()\n for category_url in category:\n category_id = re.findall(r'category-(\\d+).htm', category_url)\n if category_id:\n all_category.add(category_id[0])\n\n for category_id in all_category:\n # set the category id\n result_url, result_count = re.subn(r'(\\d+\\d+)', category_id, self.categoryUrl)\n self.logger.info(\"category url : %s\", result_url)\n yield Request(url=result_url, callback=self.parse_category)\n\n def parse_category(self, response):\n response_sel = Selector(response)\n data_widgetid = response_sel.xpath(u'//*[@class=\"J_TModule\" and @data-title=\"搜索列表\"]/@data-widgetid').extract()\n wid = data_widgetid[0]\n\n mid = 'w-' + wid + '-0'\n catId = get_query(response.url, 'catId')\n path = \"/category\"+catId + '.htm'\n pageNo = get_query(response.url, 'pageNo')\n\n page_url = set_query(self.asyncUrl, wid=wid, mid=mid, path=path, catId=catId, scid=catId,pageNo=pageNo)\n\n yield Request(url=page_url, callback=self.parse_nextpage)\n\n def parse_nextpage(self, response):\n response_sel = Selector(response)\n next_pageurl = response_sel.xpath(u'//a[contains(@class,\"next\")]/@href').extract()\n\n if len(next_pageurl) > 0:\n page_num = get_query(next_pageurl[0], 'pageNo')\n next_url = set_query(self.categoryUrl, pageNo=page_num)\n yield Request(url=next_url, callback=self.parse_category)\n else:\n self.logger.warning(\"Can not find the next page url ! \")\n\n dl_bodys = response_sel.xpath(u'/html/body/div/div[3]')\n\n for dl_body in dl_bodys:\n item_lines = dl_body.xpath(u'./div/dl')\n for item_line in item_lines:\n comment_item = TmallCommentItem()\n\n data_id = item_line.xpath(u'./@data-id').extract()\n\n item_id = re.findall('(\\d+)', data_id[0])\n\n item_name = item_line.xpath(u'./dd[contains(@class,\"detail\")]/a/text()').extract()\n item_type = item_line.xpath(u'./dd[contains(@class,\"detail\")]/a/span/text()').extract()\n item_price = item_line.xpath(u'./dd[contains(@class,\"detail\")]/div/div[contains(@class,\"cprice-area\")]/span/text()').extract()\n item_sales = item_line.xpath(u'./dd[contains(@class,\"detail\")]/div/div[contains(@class,\"sale-area\")]/span/text()').extract()\n\n if len(item_name) > 1:\n comment_item['ItemName'] = item_name[0].strip() + ' ' + item_name[1].strip()\n else:\n comment_item['ItemName'] = item_name[0].strip()\n\n if len(item_type) > 0:\n comment_item['ItemType'] = item_type[0].strip()\n if len(item_price) > 1:\n comment_item['ItemPrice'] = item_price[1].strip()\n if len(item_sales) > 0:\n comment_item['ItemSales'] = item_sales[0].strip()\n\n yield comment_item\n\n # if len(item_id) > 0:\n # comment_url = set_query(self.rateUrl, itemId=item_id[0])\n # yield Request(url=comment_url,\n # meta={'item': comment_item},\n # callback=self.parse_comment)\n # else:\n # self.logger.error('Get the item id error !')\n\n # def parse_comment(self, response):\n # response_sel = Selector(response)\n # comment_item = response.meta['item']\n #\n # allPageCount = re.findall('\"lastPage\\\":(.+?)\\,', response_sel.extract())[0]\n #\n # # 对每一页的评论进行解析\n # i = 1\n # while i < int(allPageCount):\n # next_link = set_query(response.url, currentPage=i)\n #\n # i = i + 1\n # yield Request(url=next_link,\n # meta={'item': comment_item},\n # callback=self.parse_detail)\n #\n # def parse_detail(self, response):\n #\n # self.logger.info(\"parse url : %s\", response.url)\n # response_sel = Selector(response)\n # commentJson = re.findall('\\\"rateList\\\":(\\[.*?\\])\\,\\\"searchinfo\\\"', response_sel.extract())[0]\n #\n # for data in json.loads(commentJson):\n # comment_item = response.meta['item']\n #\n # comment_item['itemId'] = get_query(response.url, 'itemId')\n # comment_item['userNick'] = data['displayUserNick']\n # comment_item['rateDate'] = data['rateDate']\n # comment_item['rateContent'] = data['rateContent']\n #\n # yield comment_item\n" }, { "alpha_fraction": 0.6600984930992126, "alphanum_fraction": 0.6625615954399109, "avg_line_length": 21.55555534362793, "blob_id": "a463ca05c2b1190961444597f6ccfe5df34383a7", "content_id": "e1e44ae011f16d037538cc874f6b01d30b120a70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 406, "license_type": "no_license", "max_line_length": 54, "num_lines": 18, "path": "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport random\nimport logging\nfrom ohmydata_spider.util.sinaCookie import cookies\n\n\n__author__ = 'mee'\n\n\nclass CookiesMiddleware(object):\n\n def process_request(self, request, spider):\n cookie = random.choice(cookies)\n # logger = logging.getLogger(spider.name)\n # logger.info(\"Get the cookie: %s\" % (cookie))\n request.cookies = cookie\n" }, { "alpha_fraction": 0.6649659872055054, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 21.576923370361328, "blob_id": "aef058435a987f40714fb94f36a4f00bb174edce", "content_id": "9600a370cd40f6b916f4969ceed6ff7ddc6104de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 588, "license_type": "no_license", "max_line_length": 77, "num_lines": 26, "path": "/ohmydata_spider/spiders/spidertest.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n__author__ = 'mee'\nfrom scrapy_redis.spiders import RedisSpider\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\nfrom ohmydata_spider.items import TmallCommentItem\nimport re\nimport ohmydata_spider.pipelines\n\n\nclass TestSpider(RedisSpider):\n name = \"SpiderTest\"\n\n start_urls = (\n \"http://weibo.cn/pub/\",\n )\n proxy = ''\n\n def parse(self, response):\n response_sel = Selector(response)\n\n hot_weibo = response_sel.xpath(u'//a[contains(@href, \"http\")]/@href')\n\n print hot_weibo.extract()\n\n" }, { "alpha_fraction": 0.4850336015224457, "alphanum_fraction": 0.5027489066123962, "avg_line_length": 24.184616088867188, "blob_id": "42649511d46e97cf0e6f04aca03f1ffbd779e794", "content_id": "5fc4290bfd2cb4e3fdc79826a43c547f32d723ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1637, "license_type": "no_license", "max_line_length": 85, "num_lines": 65, "path": "/ohmydata_spider/util/sinaCookie.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport json\nimport base64\nimport requests\nimport logging\n\n__author__ = 'mee'\n\n'''\n Crawl Weibo Account\n'''\nWeiBoAccount = [\n {'user': '[email protected]', 'psw': 'a123456'},\n]\n\n\ndef getCookies(weibo):\n \"\"\"\n function: get cookies\n :param weibo: weibo Account Info\n :return: cookies\n \"\"\"\n cookies = []\n loginURL = r\"https://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.15)\"\n for elem in weibo:\n account = elem['user']\n password = elem['psw']\n username = base64.b64encode(account.encode('utf-8')).decode('utf-8')\n\n postData = {\n \"entry\": \"sso\",\n \"gateway\": \"1\",\n \"from\": \"null\",\n \"savestate\": \"30\",\n \"useticket\": \"0\",\n \"pagerefer\": \"\",\n \"vsnf\": \"1\",\n \"su\": username,\n \"service\": \"sso\",\n \"sp\": password,\n \"sr\": \"1440*900\",\n \"encoding\": \"UTF-8\",\n \"cdult\": \"3\",\n \"domain\": \"sina.com.cn\",\n \"prelt\": \"0\",\n \"returntype\": \"TEXT\",\n }\n session = requests.Session()\n r = session.post(loginURL, data=postData)\n jsonStr = r.content.decode('gbk')\n info = json.loads(jsonStr)\n\n if info[\"retcode\"] == \"0\":\n print \"Cookie Account: %s\"%(account)\n logging.info(\"Cookie Account: %s\"%(account))\n cookie = session.cookies.get_dict()\n cookies.append(cookie)\n else:\n logging.warn(\"Cookie get Failed : %s\"%(info['reason']))\n\n return cookies\n\ncookies = getCookies(WeiBoAccount)\n" }, { "alpha_fraction": 0.5998731851577759, "alphanum_fraction": 0.6005073189735413, "avg_line_length": 19.205127716064453, "blob_id": "9031290c929ff4d36945c1f43b35f3a465c8c5a5", "content_id": "ad0cb698e7e26ddca332c75180a4faccb4f0dd51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1577, "license_type": "no_license", "max_line_length": 54, "num_lines": 78, "path": "/ohmydata_spider/items.py", "repo_name": "AaronChiu2017/ohmydata_spider", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/topics/items.html\n\n\nfrom scrapy.item import Item, Field\n\n\nclass TutorialItem(Item):\n # define the fields for your item here like:\n # name = Field()\n proxy_url = Field()\n proxy_type = Field()\n proxy_locate = Field()\n\n\nclass DataTreasureItem(Item):\n book_name = Field()\n book_description = Field()\n\n\nclass JdBookItem(Item):\n number = Field()\n bookName = Field()\n author = Field()\n press = Field()\n bookId = Field()\n price = Field()\n preferentialPrice = Field()\n\n\nclass TmallCommentItem(Item):\n ItemName = Field()\n ItemType = Field()\n ItemSales = Field()\n ItemPrice = Field()\n itemId = Field()\n userNick = Field()\n rateDate = Field()\n rateContent = Field()\n\n\nclass WeiboInfoItem(Item):\n \"\"\"\n weibo Account info\n \"\"\"\n id = Field()\n NickName = Field()\n Gender = Field()\n Province = Field()\n City = Field()\n Signature = Field()\n Birthday = Field()\n Num_Tweets = Field()\n Num_Follows = Field()\n Num_Fans = Field()\n Sex_Orientation = Field()\n Marriage = Field()\n URL = Field()\n\n\nclass WeiboContentItem(Item):\n \"\"\"\n weibo content info\n \"\"\"\n id = Field()\n ID = Field()\n Content = Field()\n PubTime = Field()\n Co_oridinates = Field() # location\n Tools = Field() # publish tools eg.computer phone\n Like = Field() # count of the like\n Comment = Field() # count of the comment\n Transfer = Field()\n\n" } ]
17
97kristr/databaser_tuto
https://github.com/97kristr/databaser_tuto
b8b6cc8b351bcb378ef81b1eabe4fbc69ee539a7
af4e345357175ea59ed07fb8a2f8ebc8b55e5245
f74077c2b2b68e131a1c58b0ca72d3829c810191
refs/heads/master
2021-01-09T20:26:37.473301
2016-06-01T10:25:39
2016-06-01T10:25:39
60,167,622
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6477876305580139, "alphanum_fraction": 0.6584070920944214, "avg_line_length": 32.17647171020508, "blob_id": "fa88687c9ab0bb3d0a7d53eb73f745967bcb2130", "content_id": "5a66f6bf76048923238c4fd093e0c768dc6e23b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 565, "license_type": "no_license", "max_line_length": 73, "num_lines": 17, "path": "/inserting_data_with_relationships.py", "repo_name": "97kristr/databaser_tuto", "src_encoding": "UTF-8", "text": "\n\nfrom database_functions import query\n\ndef insert_product_data(records):\n sql = \"insert into Product (Name,Price,ProductTypeID) values (?,?,?)\"\n for record in records:\n query(sql, record)\n\ndef insert_product_type_data(records):\n sql = \"insert into ProductType(Description) values (?)\"\n for record in records:\n query(sql, record)\n\nif __name__ == \"__main__\":\n products = [(\"Signature\",4.0,4),(\"Caramel Delight\",3.5,4)]\n insert_product_data(products)\n product_types = [(\"Hot Chocolate\",)]\n insert_product_type_data(product_types)" } ]
1
daveshah1/linux-on-litex-vexriscv
https://github.com/daveshah1/linux-on-litex-vexriscv
63cb4a44f6200976ed69962095bccaf40cb7aec6
6b6f12660e23c217c7f9790f2f023addffc52213
81d835ea4b2d82d8363354372a36977f4113c572
refs/heads/master
2020-05-19T07:45:37.804644
2019-05-04T09:30:47
2019-05-04T09:30:47
184,904,323
3
1
null
2019-05-04T14:20:31
2019-05-04T09:31:39
2019-05-04T09:31:37
null
[ { "alpha_fraction": 0.5278202891349792, "alphanum_fraction": 0.5755487680435181, "avg_line_length": 35.96226501464844, "blob_id": "1e27d0ef970b42bb57e9b9f4c6dd6b1614ed0005", "content_id": "85ed98e3ea464fb4da191891905cc00047fee0d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3918, "license_type": "no_license", "max_line_length": 102, "num_lines": 106, "path": "/arty.py", "repo_name": "daveshah1/linux-on-litex-vexriscv", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport argparse\n\nfrom migen import *\n\nfrom litex.boards.targets import arty\n\nfrom litex.soc.interconnect import wishbone\nfrom litex.soc.integration.soc_core import mem_decoder\nfrom litex.soc.integration.builder import Builder\n\nfrom litex.soc.cores.spi_flash import SpiFlash\n\n# LinuxSoC -----------------------------------------------------------------------------------------\n\nclass LinuxSoC(arty.EthernetSoC):\n csr_map = {\n \"ddrphy\": 16,\n \"cpu\": 17,\n \"ethphy\": 18,\n \"ethmac\": 19\n }\n csr_map.update(arty.EthernetSoC.csr_map)\n\n arty.EthernetSoC.mem_map = {\n \"rom\": 0x00000000,\n \"sram\": 0x10000000,\n \"emulator_ram\": 0x20000000,\n \"ethmac\": 0x30000000,\n \"spiflash\": 0x50000000,\n \"main_ram\": 0xc0000000,\n \"csr\": 0xf0000000,\n }\n\n def __init__(self):\n arty.EthernetSoC.__init__(self, cpu_type=\"vexriscv\", cpu_variant=\"linux\")\n self.cpu.use_external_variant(\"VexRiscv.v\")\n self.add_constant(\"NETBOOT_LINUX_VEXRISCV\", None)\n\n # machine mode emulator ram\n self.submodules.emulator_ram = wishbone.SRAM(0x4000)\n self.register_mem(\"emulator_ram\", self.mem_map[\"emulator_ram\"], self.emulator_ram.bus, 0x4000)\n\n # spiflash\n spiflash_pads = self.platform.request(\"spiflash4x\")\n spiflash_pads.clk = Signal()\n self.specials += Instance(\"STARTUPE2\",\n i_CLK=0,\n i_GSR=0,\n i_GTS=0,\n i_KEYCLEARB=0,\n i_PACK=0,\n i_USRCCLKO=spiflash_pads.clk,\n i_USRCCLKTS=0,\n i_USRDONEO=1,\n i_USRDONETS=1)\n\n self.submodules.spiflash = SpiFlash(\n spiflash_pads,\n dummy=11,\n div=2,\n endianness=self.cpu.endianness)\n self.add_wb_slave(mem_decoder(self.mem_map[\"spiflash\"]), self.spiflash.bus)\n self.add_memory_region(\"spiflash\", self.mem_map[\"spiflash\"] | self.shadow_base, 0x1000000)\n\n self.add_constant(\"FLASHBOOT_LINUX_VEXRISCV\", None)\n self.add_constant(\"FLASH_BOOT_ADDRESS\", None)\n\n# Build / Load / Flash -----------------------------------------------------------------------------\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Linux on LiteX-VexRiscv\")\n parser.add_argument(\"--build\", action=\"store_true\", help=\"build bitstream\")\n parser.add_argument(\"--load\", action=\"store_true\", help=\"load bitstream (SRAM)\")\n parser.add_argument(\"--flash\", action=\"store_true\", help=\"flash bitstream (SPI Flash)\")\n args = parser.parse_args()\n\n if args.build:\n soc = LinuxSoC()\n builder = Builder(soc, output_dir=\"build\")\n builder.build()\n\n if args.load:\n from litex.build.openocd import OpenOCD\n prog = OpenOCD(\"openocd/openocd_xilinx.cfg\")\n prog.load_bitstream(\"build/gateware/top.bit\")\n\n if args.flash:\n flash_regions = {\n \"build/gateware/top.bin\": \"0x00000000\", # FPGA image: automatically loaded at startup\n \"binaries/Image\": \"0x00400000\", # Linux Image: copied to 0xc0000000 by bios\n \"binaries/rootfs.cpio\": \"0x00800000\", # File System: copied to 0xc2000000 by bios\n \"binaries/rv32.dtb\": \"0x00f00000\", # Device tree: copied to 0xc3000000 by bios\n \"emulator/emulator.bin\": \"0x00f80000\", # MM Emulator: copied to 0x20000000 by bios\n }\n from litex.build.openocd import OpenOCD\n prog = OpenOCD(\"openocd/openocd_xilinx.cfg\",\n flash_proxy_basename=\"openocd/bscan_spi_xc7a35t.bit\")\n prog.set_flash_proxy_dir(\".\")\n for filename, base in flash_regions.items():\n base = int(base, 16)\n print(\"Flashing {} at 0x{:08x}\".format(filename, base))\n prog.flash(base, filename)\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.596360981464386, "alphanum_fraction": 0.7053857445716858, "avg_line_length": 38.94767379760742, "blob_id": "b1e0367ea4ddc49334242b5674d11da82e1fce0a", "content_id": "a36bcac639c84b1d46889bc276e6a4204abbab39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6870, "license_type": "no_license", "max_line_length": 221, "num_lines": 172, "path": "/README.md", "repo_name": "daveshah1/linux-on-litex-vexriscv", "src_encoding": "UTF-8", "text": "# Experiments with Linux on LiteX-VexRiscv\n\n> **Note:** Tested on Ubuntu 18.04.*\n\n## Demo:\nhttps://asciinema.org/a/WfNA99RCdVi8kTPfzNTeoMTtY :)\n\n## Installing LiteX\n```sh\n$ wget https://raw.githubusercontent.com/enjoy-digital/litex/master/litex_setup.py\n$ ./litex_setup.py init install --user\n```\n## Installing a RISC-V toolchain\n```sh\n$ wget https://static.dev.sifive.com/dev-tools/riscv64-unknown-elf-gcc-20171231-x86_64-linux-centos6.tar.gz\n$ tar -xvf riscv64-unknown-elf-gcc-20171231-x86_64-linux-centos6.tar.gz\n$ export PATH=$PATH:$PWD/riscv64-unknown-elf-gcc-20171231-x86_64-linux-centos6/bin/\n```\n## Installing Verilator (only needed for simulation)\n```sh\n$ apt install verilator\n$ apt install libevent-dev libjson-c-dev\n```\n## Installing OpenOCD (only needed for hardware test)\n```sh\n$ git clone https://github.com/ntfreak/openocd.git\n$ cd openocd\n$./bootstrap\n$./configure --enable-ftdi\n$make\n$sudo make install\n```\n\n## Running the LiteX simulation\n```sh\n$ ./sim.py\n```\nYou should see Linux booting and be able to interact with it:\n```\n __ _ __ _ __\n / / (_) /____ | |/_/\n / /__/ / __/ -_)> <\n /____/_/\\__/\\__/_/|_|\n\n (c) Copyright 2012-2019 Enjoy-Digital\n (c) Copyright 2012-2015 M-Labs Ltd\n\n BIOS built on May 2 2019 18:58:54\n BIOS CRC passed (97ea247b)\n\n--============ SoC info ================--\nCPU: VexRiscv @ 1MHz\nROM: 32KB\nSRAM: 4KB\nMAIN-RAM: 131072KB\n\n--========= Peripherals init ===========--\n\n--========== Boot sequence =============--\nBooting from serial...\nPress Q or ESC to abort boot completely.\nsL5DdSMmkekro\nTimeout\nExecuting booted program at 0x20000000\n--============= Liftoff! ===============--\nVexRiscv Machine Mode software built May 3 2019 19:33:43\n--========== Booting Linux =============--\n[ 0.000000] No DTB passed to the kernel\n[ 0.000000] Linux version 5.0.9 (florent@lab) (gcc version 8.3.0 (Buildroot 2019.05-git-00938-g75f9fcd0c9)) #1 Thu May 2 17:43:30 CEST 2019\n[ 0.000000] Initial ramdisk at: 0x(ptrval) (8388608 bytes)\n[ 0.000000] Zone ranges:\n[ 0.000000] Normal [mem 0x00000000c0000000-0x00000000c7ffffff]\n[ 0.000000] Movable zone start for each node\n[ 0.000000] Early memory node ranges\n[ 0.000000] node 0: [mem 0x00000000c0000000-0x00000000c7ffffff]\n[ 0.000000] Initmem setup node 0 [mem 0x00000000c0000000-0x00000000c7ffffff]\n[ 0.000000] elf_hwcap is 0x1100\n[ 0.000000] Built 1 zonelists, mobility grouping on. Total pages: 32512\n[ 0.000000] Kernel command line: mem=128M@0x40000000 rootwait console=hvc0 root=/dev/ram0 init=/sbin/init swiotlb=32\n[ 0.000000] Dentry cache hash table entries: 16384 (order: 4, 65536 bytes)\n[ 0.000000] Inode-cache hash table entries: 8192 (order: 3, 32768 bytes)\n[ 0.000000] Sorting __ex_table...\n[ 0.000000] Memory: 119052K/131072K available (1957K kernel code, 92K rwdata, 317K rodata, 104K init, 184K bss, 12020K reserved, 0K cma-reserved)\n[ 0.000000] SLUB: HWalign=64, Order=0-3, MinObjects=0, CPUs=1, Nodes=1\n[ 0.000000] NR_IRQS: 0, nr_irqs: 0, preallocated irqs: 0\n[ 0.000000] clocksource: riscv_clocksource: mask: 0xffffffffffffffff max_cycles: 0x114c1bade8, max_idle_ns: 440795203839 ns\n[ 0.000155] sched_clock: 64 bits at 75MHz, resolution 13ns, wraps every 2199023255546ns\n[ 0.001515] Console: colour dummy device 80x25\n[ 0.008297] printk: console [hvc0] enabled\n[ 0.009219] Calibrating delay loop (skipped), value calculated using timer frequency.. 150.00 BogoMIPS (lpj=300000)\n[ 0.009919] pid_max: default: 32768 minimum: 301\n[ 0.016255] Mount-cache hash table entries: 1024 (order: 0, 4096 bytes)\n[ 0.016802] Mountpoint-cache hash table entries: 1024 (order: 0, 4096 bytes)\n[ 0.044297] devtmpfs: initialized\n[ 0.061343] clocksource: jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 7645041785100000 ns\n[ 0.061981] futex hash table entries: 256 (order: -1, 3072 bytes)\n[ 0.117611] clocksource: Switched to clocksource riscv_clocksource\n[ 0.251970] Unpacking initramfs...\n[ 2.005474] workingset: timestamp_bits=30 max_order=15 bucket_order=0\n[ 2.178440] Block layer SCSI generic (bsg) driver version 0.4 loaded (major 254)\n[ 2.178909] io scheduler mq-deadline registered\n[ 2.179271] io scheduler kyber registered\n[ 3.031140] random: get_random_bytes called from init_oops_id+0x4c/0x60 with crng_init=0\n[ 3.043743] Freeing unused kernel memory: 104K\n[ 3.044070] This architecture does not have kernel memory protection.\n[ 3.044472] Run /init as init process\nmount: mounting tmpfs on /dev/shm failed: Invalid argument\nmount: mounting tmpfs on /tmp failed: Invalid argument\nmount: mounting tmpfs on /run failed: Invalid argument\nStarting syslogd: OK\nStarting klogd: OK\nInitializing random number generator... [ 4.374589] random: dd: uninitialized urandom read (512 bytes read)\ndone.\nStarting network: ip: socket: Function not implemented\nip: socket: Function not implemented\nFAIL\n\n\nWelcome to Buildroot\nbuildroot login: root\nlogin[48]: root login on 'hvc0'\n# help\nBuilt-in commands:\n------------------\n\t. : [ [[ alias bg break cd chdir command continue echo eval exec\n\texit export false fg getopts hash help history jobs kill let\n\tlocal printf pwd read readonly return set shift source test times\n\ttrap true type ulimit umask unalias unset wait\n#\n#\n```\n\n## Running on hardware with the Digilent Arty board\nTo build the target, you will need to install Vivado and run:\n```sh\n$ ./arty.py --build\n```\n**The bitstream used for the demo is also provided ( *build/gateware/top.bit/bin*) if you don't want to rebuild it.**\n\nThe board will load the kernel binaries over TFTP from 192.168.1.100. You need to copy the files in *binaries* directory and *emulator/emulator.bin* to your TFTP root directory. Once done, you can load the bitstream with:\n```sh\n$ ./arty.py --load\n```\nYou can also flash the binaries to the SPI Flash of the board and directly boot from it with (**this is the recommended way if you don't want to set up a TFTP server**):\n```sh\n$ ./arty.py --flash\n```\nOpen your prefered terminal or use lxterm:\n```sh\n$ lxterm /dev/ttyUSBX\n```\nAnd you should see the BIOS prompt and Linux booting :)\n\n## Generating the Linux binaries (optional)\n```sh\n$ git clone http://github.com/buildroot/buildroot\n$ cd buildroot\n$ cp -r ../linux-litex-vexriscv/buildroot/* ./\n$ make litex_vexriscv_defconfig\n$ make\n```\nThe binaries are located in *output/images/*.\n\n## Generating the VexRiscv Linux variant (optional)\nInstall VexRiscv requirements: https://github.com/enjoy-digital/VexRiscv-verilog#requirements\n\nClone VexRiscv repository and generate the Linux variant:\n```sh\n$ git clone http://github.com/enjoy-digital/Vexriscv-verilog --recursive\n$ sbt \"runMain vexriscv.GenCoreDefault --externalInterruptArray=true --csrPluginConfig=linux-minimal\"\n```\nThe Linux variant is the *VexRiscv.v* file." } ]
2
vishal-pandey/ensembl
https://github.com/vishal-pandey/ensembl
7de04cf3e78ede064196fa07c70e7b72b0c1565c
09411aa856774e67a38af35efee4cabe5e5ba2c3
a1be91afc19e67a5eece284fe7bff8c0ec03d7c1
refs/heads/master
2020-04-30T06:43:24.097023
2019-03-20T11:31:13
2019-03-20T11:31:13
176,660,635
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5930131077766418, "alphanum_fraction": 0.6087336540222168, "avg_line_length": 25.022727966308594, "blob_id": "4ec2e1eadc5ded682722b7a412bf72d868a5a73e", "content_id": "2841bd8690b0c93a8dc02981db649bd08c6e0a41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1145, "license_type": "no_license", "max_line_length": 104, "num_lines": 44, "path": "/ftp/views.py", "repo_name": "vishal-pandey/ensembl", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom ftplib import FTP\nimport json\n\ndef index(request):\n\tftp = FTP('ftp.ensembl.org')\n\tftp.login()\n\tftp.cwd('pub/release-95')\n\tpath = \".\"+str(request.GET['path'])\n\tftp.cwd(path)\n\tdata = []\n\tftp.dir(data.append)\n\n\tfiles = 0\n\tdirs = 0\n\n\toutput = []\n\tfor x in data:\n\t\ty = x.split()\n\t\tconnector = '/'\n\t\tif request.GET['path'] == '/' or request.GET['path'][-1] == '/':\n\t\t\tconnector = ''\n\t\tvdict = {}\n\t\tif x[0] == '-':\n\t\t\tvdict['url'] = 'ftp://ftp.ensembl.org/pub/release-95'+str(request.GET['path'])+connector+str(y[8])\n\t\t\tvdict['type'] = 'file'\n\t\t\tfiles = files+1\n\t\telif x[0] == 'd':\n\t\t\tvdict['url'] = 'https://ensembl.vishalpandey.xyz/?path='+str(request.GET['path'])+connector+str(y[8])\n\t\t\tvdict['type'] = 'dir'\n\t\t\tdirs = dirs+1\n\t\tvdict['size'] = int(y[4])\n\t\tvdict['date_modified'] = y[5]+\" \"+y[6]+\" \"+y[7]\n\t\tvdict['name'] = str(y[8])\n\t\t\n\t\toutput.append(vdict)\n\n\tresult = {}\n\tresult['self'] = 'https://ensembl.vishalpandey.xyz/?path='+str(request.GET['path'])\n\tresult['files'] = files\n\tresult['dirs'] = dirs\n\tresult['data'] = output\n\treturn HttpResponse(json.dumps(result))\n" } ]
1
igor376/Web-Calendar
https://github.com/igor376/Web-Calendar
7c7113f844e515113384ebba921bf95d88b41047
a947142f066a020759bf3fb0322e904ab04c26f9
a6531346ce9b2cd58d5aa78d88a182ee91455742
refs/heads/master
2023-07-20T20:02:48.006732
2021-09-07T10:41:32
2021-09-07T10:41:32
403,936,055
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7519999742507935, "alphanum_fraction": 0.7519999742507935, "avg_line_length": 60.75, "blob_id": "599796c5902f9c6513bba406cd3e3be53b8b75f4", "content_id": "f3efc67a83e2b0578314221f18dbe9a5e558af0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 250, "license_type": "no_license", "max_line_length": 112, "num_lines": 4, "path": "/Web Calendar/task/README.md", "repo_name": "igor376/Web-Calendar", "src_encoding": "UTF-8", "text": "This is simple web-notes that works with REST API and saves notes to the database.\nYou can get(today's, all, by id), add, remove, notes. Also, it's possible get events in the selected time range.\n\nI used: Flask-RESTful, Flask-SQLAlchemy and Flask\n\n\n\n" }, { "alpha_fraction": 0.5955541729927063, "alphanum_fraction": 0.5970978736877441, "avg_line_length": 29.847618103027344, "blob_id": "0b768b0d97e936a1ae9560cc84741f9f0e54858c", "content_id": "5325553982e8c94d2c61fe7f7454da6baa193b3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3239, "license_type": "no_license", "max_line_length": 121, "num_lines": 105, "path": "/Web Calendar/task/app.py", "repo_name": "igor376/Web-Calendar", "src_encoding": "UTF-8", "text": "from flask import Flask, abort\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_restful import Api, Resource, reqparse, inputs, fields, marshal_with\nfrom datetime import date\nimport sys\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///notes.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\napi = Api(app)\n\nevents_json = {\n \"id\": fields.Integer,\n \"event\": fields.String,\n \"date\": fields.String,\n \"start_time\": fields.String,\n \"end_time\": fields.String\n}\n\n\nclass EventsDb(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n event = db.Column(db.VARCHAR, nullable=False)\n date = db.Column(db.Date, nullable=False)\n\n def __repr__(self):\n return f'id={self.id} {self.date} events=\"{self.event}\"'\n\n\ndb.create_all()\n\n\nclass Events(Resource):\n @marshal_with(events_json)\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"start_time\", type=inputs.date)\n parser.add_argument(\"end_time\", type=inputs.date)\n args = parser.parse_args()\n if args[\"start_time\"] is not None and args[\"end_time\"] is not None:\n events = EventsDb.query.all()\n answer = []\n for event in events:\n if args[\"start_time\"].strftime(\"%Y-%m-%d\") <= str(event.date) <= args[\"end_time\"].strftime(\"%Y-%m-%d\"):\n answer.append(event)\n return answer\n else:\n events = EventsDb.query.all()\n return events\n\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('event', type=str, help='The event name is required!', required=True)\n parser.add_argument('date', type=inputs.date,\n help='The event date with the correct format is required! The correct format is YYYY-MM-DD!',\n required=True)\n args = parser.parse_args()\n db.session.add(EventsDb(event=args[\"event\"], date=args[\"date\"]))\n db.session.commit()\n return {\n \"message\": \"The event has been added!\",\n \"event\": f'{args[\"event\"]}',\n \"date\": f'{date.strftime(args[\"date\"], \"%Y-%m-%d\")}'\n }\n\n\nclass EventsToday(Resource):\n @marshal_with(events_json)\n def get(self):\n events = EventsDb.query.filter_by(date=date.today()).all() #\n return events\n\n\nclass EventById(Resource):\n @marshal_with(events_json)\n def get(self, event_id):\n event = get_events(event_id)\n return event\n\n def delete(self, event_id):\n event = get_events(event_id)\n db.session.delete(event)\n db.session.commit()\n return {\n \"message\": \"The event has been deleted!\"\n }\n\n\ndef get_events(event_id):\n event = EventsDb.query.filter_by(id=event_id).first()\n if event is None:\n abort(404, \"The event doesn't exist!\")\n return event\n\n\napi.add_resource(Events, '/event')\napi.add_resource(EventsToday, '/event/today')\napi.add_resource(EventById, '/event/<int:event_id>')\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n arg_host, arg_port = sys.argv[1].split(':')\n app.run(host=arg_host, port=arg_port)\n else:\n app.run()\n" } ]
2
Mengjiao926/GitHub_code
https://github.com/Mengjiao926/GitHub_code
cc8d0e2c9221de70ab9081be165c2c90b3e78c02
c609f7586d69e1ad1a1f5520c0e24b9724017e1e
658804ec91cf95a85e0a1c8fd828b2b835eba383
refs/heads/master
2020-03-13T05:32:23.311634
2018-04-18T09:51:51
2018-04-18T09:51:51
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5130242705345154, "alphanum_fraction": 0.5682119131088257, "avg_line_length": 31.165876388549805, "blob_id": "7b593d6ed047286f0bf4ca2f7408246d833a1533", "content_id": "2264a600c4eb9e48e67c2f92f9ffec995af0e066", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7197, "license_type": "no_license", "max_line_length": 158, "num_lines": 211, "path": "/Spider/spider_imge/main.py", "repo_name": "Mengjiao926/GitHub_code", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\"\"\"\nCreate on 2018/3/27\n@author:chenglei\n\n\"\"\"\nimport os\nimport re\nimport os, re, time, random, socket, urllib.request,datetime\nfrom lxml import etree\nfrom os.path import dirname\nfrom itertools import product\n\nBASEPATH = dirname(os.path.abspath(__file__)).replace('\\\\', '/')\n\ndef visit_url(url):\n user_agents = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.43 BIDUBrowser/6.x Safari/537.31',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.44 Safari/537.36 OPR/24.0.1558.25 (Edition Next)',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36 OPR/23.0.1522.60 (Edition Campaign 54)'\n 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36',\n 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Mobile Safari/535.19',\n 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0',\n 'Mozilla/5.0 (Android; Mobile; rv:14.0) Gecko/14.0 Firefox/14.0',\n 'Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19',\n 'Mozilla/5.0 (Linux; U; Android 4.0.4; en-gb; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30'\n ]\n user_agent = random.choice(user_agents)\n headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Connection': 'keep-alive',\n 'User-Agent': user_agent\n }\n req = urllib.request.Request(url, headers=headers, method='GET')\n response = urllib.request.urlopen(req)\n html = response.read()\n response.close() \n return html.decode('utf-8')#, 'ignore')\n\n\ndef url_math(url):\n #网页上获取到的url不完成,需要对其进行拼\n if len(url)>0:\n full_url='https://www.chunyuyisheng.com'+url[0]\n else:\n full_url='https://www.chunyuyisheng.com'\n \n return full_url\n \n \n\ndef url_math1(url):\n #网页上获取到的url不完成,需要对其进行拼\n full_url='https://www.chunyuyisheng.com'+url\n return full_url\n \n\n \ndef get_next_url(html):\n # 获取下一页面url\n e_HTML=etree.HTML(html)\n page_next_url=e_HTML.xpath('//a[@class=\"next\"]/@href')\n return page_next_url\n \n\n\n\ndef get_all_url(html):\n #获取当前页面所有Url\n e_HTML=etree.HTML(html)\n e_HTML1=e_HTML.xpath('//div[@class=\"doctor-list\"]')\n for html in e_HTML1:\n list_url=html.xpath('div/div[1]/a/@href')\n return list_url\n\ndef get_path(content):\n content=content.strip()\n if re.findall('湿疹',content):\n save_path=BASEPATH+'/湿疹'\n mk_dir(save_path)\n \n elif re.findall('痤疮',content):\n save_path=BASEPATH+'/痤疮'\n mk_dir(save_path)\n \n elif re.findall('胎记',content):\n save_path=BASEPATH+'/胎记'\n mk_dir(save_path)\n \n elif re.findall('蒙古斑',content):\n save_path=BASEPATH+'/蒙古斑'\n mk_dir(save_path)\n else:\n save_path=BASEPATH+'/其他'\n mk_dir(save_path)\n return save_path\n\n\ndef mk_dir(SAVEPATH):\n if not os.path.exists(SAVEPATH):\n os.mkdir(SAVEPATH)\n\n\ndef get_huati_more_url(url):\n #返回医生话题更多url\n html=visit_url(url)\n time.sleep(2)\n html=etree.HTML(html)\n url=html.xpath('//a[@class=\"more\"]/@href')\n full_url=url_math(url)\n return full_url \n \n \ndef get_huti_urllist(html):\n #xpath获取更多链接 \n html=etree.HTML(html)\n for html1 in html.xpath('/html/body/div[4]/div[2]'): \n list_url=html1.xpath('div/div[1]/a/@href')\n list_content=html1.xpath('div/div[1]/a/text()') \n return dict(zip(list_url,list_content))\n\n\ndef get_huati_main(url):\n #循环获取话题url链接,且进行访问\n #正则表达式获取图片url数据\n page_next=url\n flag=True\n while flag==True:\n html1=visit_url(page_next)\n print(page_next)\n huti_all=get_huti_urllist(html1)\n for huati_url in huti_all: \n path=get_path(huti_all[huati_url])\n huati_url=url_math1(huati_url)\n print('话题url:'+huati_url)\n html=visit_url(huati_url)\n time.sleep(2)\n try:\n get_image(html,path) \n except:\n pass\n page_next= get_next_url(html1)\n if len(page_next)==0:\n flag=False\n page_next=url_math(page_next) \n time.sleep(5) \n \n \n \n \n \n \ndef get_image(html,path):\n #获取图片url并且进行保存\n p=r'<img src=\"(https.*?)\"'\n html_list=re.findall(p,html)\n if len(html_list)>0:\n print('已获取到该网页所有图片')\n for im_url in html_list:\n save_image(im_url,path)\n else:\n print('该网页没有图片数据')\n \n \n \ndef save_image(url,path): \n #保存图片到指定文件夹下面\n #https://r.sinaimg.cn/large/article/b4166b1244113c5afb9cca3853693f58.png\n imgurl=url\n if len(re.findall('\\.png',imgurl))==0:\n if len(re.findall('\\.jpg',imgurl))==0:\n p = r'[A-Za-z0-9]+'\n a = re.findall(p, imgurl)\n urllib.request.urlretrieve(imgurl, path + '/' + a[-1]+'.jpg')\n else:\n p = r'[A-Za-z0-9]+\\.jpg'\n a = re.findall(p, imgurl)\n print(a,imgurl)\n urllib.request.urlretrieve(imgurl, path + '/' + a[0])\n\n \ndef main(url):\n #解析当前页面Url\n page_next=url\n flag=True\n while flag==True:\n html=visit_url(page_next)\n doctor_list=get_all_url(html) \n for dortor_url in doctor_list:\n #获取话题链接 \n try:\n \n dortor_url=url_math1(dortor_url)\n print('医生url:'+dortor_url)\n talk_url=get_huati_more_url(dortor_url) \n get_huati_main(talk_url)\n \n except:\n print('抛出异常情况')\n page_next= get_next_url(html)\n if len(page_next)==0:\n break;\n page_next=url_math(page_next) \n \nif __name__ == '__main__':\n \n\t#获取所有医生下的图片\t\n #main('https://www.chunyuyisheng.com/pc/search/doctors/?query=%E7%9A%AE%E8%82%A4%E7%97%85')\n\t#获取一个医生下的图片,答辩做演示用。\n\t get_huati_main('https://www.chunyuyisheng.com/pc/topic/list/?doctor_id=clinic_web_176d72ee15105284')\n\t \n " }, { "alpha_fraction": 0.5956735014915466, "alphanum_fraction": 0.6110615730285645, "avg_line_length": 29.216217041015625, "blob_id": "ee0995f7350e15f2c20d08ef707389dc1edd9235", "content_id": "c79841bae4e9c26ba2fa590ab3f99e43fb164c34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5048, "license_type": "no_license", "max_line_length": 123, "num_lines": 148, "path": "/NLP/Keywrod/main.py", "repo_name": "Mengjiao926/GitHub_code", "src_encoding": "UTF-8", "text": "from textrank4zh import TextRank4Keyword\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom os.path import dirname\nimport numpy as np\nimport pandas as pd\nimport os\nimport time\n\nBASEPATH = dirname(os.path.abspath(__file__)).replace('\\\\', '/')\nstop_words=BASEPATH+'/english.txt'\nstop_list=[]\nf=open(stop_words,mode='r')\nfor row in f.readlines():\n stop_list.append(row.strip())\n \n\t \n######\n# author: leicheng,此处作者请更换为自己的名字\n# create_time 20180327\n####\n\n\ndef textrank(text):\n # text 传入为字符串形式\n # textrank提取摘要 \n \n word=TextRank4Keyword(stop_words_file=stop_words)\n word.analyze(text,window=5,lower=True)\n wor_list=word.get_keywords(num=5,word_min_len=1)\n return wor_list \n\ndef tf_idf(text): \n # text 传入为数组形式 ['this is content']\n vectorizer = CountVectorizer(stop_words=stop_list)\n X = vectorizer.fit_transform(text)\n #获取词袋中所有文本关键词\n word = vectorizer.get_feature_names() \n transformer = TfidfTransformer()\n #将词频矩阵X统计成TF-IDF值\n tfidf = transformer.fit_transform(X)\n weight = tfidf.toarray() # 将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重\n key={}\n for i in range(len(weight)): # 打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重\n for j in range(len(word)):\n key[word[j]]=weight[i][j]\n \n list=sorted(key.items(),key = lambda x:x[1],reverse = True)\n return list[0:5]\n\n\ndef read_row_data(read_path):\n \n return pd.read_excel(read_path,header=None)\n\n\ndef wirte_data(file_out,aline): \n file_out.write(aline)\n file_out.flush()\n\ndef mian():\n #原始数据地址一定要与代码存放地址保持一致\n read_path=BASEPATH+'/abstract.xlsx'\n df_data=read_row_data(read_path)\n #print(df_data.head(5))\n save_path=BASEPATH+'/abstract_out.xlsx'\n file_out=open(save_path,mode='w')\n list=[]\n for row in df_data.iterrows():\n \n contens=row[1][0]\n contents1=contens.split('AB -')\n content=[]\n content.append(contents1[1])\n key=textrank(contents1[1])\n key1=tf_idf(content)\n words=[]\n weigths=[]\n for row1 in range(len(key)):\n word=key[row1]['word']\n weigth=key[row1]['weight']\n \n words.append(word) \n weigths.append(str(weigth))\n \n alone=','.join(words) \n weigth1=','.join(weigths)\n \n \n \n words=[]\n weigths=[]\n for row1 in range(len(key1)):\n word=key1[row1][0]\n weigth=key1[row1][1]\n \n words.append(word)\n weigths.append(str(weigth))\n \n alone1=','.join(words)\n weigth2=','.join(weigths)\n \n \n #aline=format('\"%s\"|\"%s\"|\"%s\" \\n' %(contens,alone,alone1))\n #print(weigth1,weigth2)\n list.append([contens,alone,weigth1,alone1,weigth2])\n #wirte_data(file_out,aline)\n df=pd.DataFrame(list,columns=['stract','textrank_keyword','textrank_weight','tf_idf_keyword','tf_idf_weight'])\n df.to_excel(save_path)\n \n \n\nif __name__ == '__main__':\n mian()\n \n read_path=BASEPATH+'/abstract.xlsx'\n df_data=read_row_data(read_path)\n save_path=BASEPATH+'/abstract_out1.xlsx'\n crops=[]\n for row in df_data.iterrows():\n contens=row[1][0]\n crops.append(contens) \n vectorizer=CountVectorizer(stop_words=stop_list)#该类会将文本中的词语转换为词频矩阵,矩阵元素a[i][j] 表示j词在i类文本下的词频 \n transformer=TfidfTransformer()#该类会统计每个词语的tf-idf权值 \n tfidf=transformer.fit_transform(vectorizer.fit_transform(crops))#第一个fit_transform是计算tf-idf,第二个fit_transform是将文本转为词频矩阵 \n word=vectorizer.get_feature_names()#获取词袋模型中的所有词语 \n weight=tfidf.toarray()#将tf-idf矩阵抽取出来,元素a[i][j]表示j词在i类文本中的tf-idf权重 \n df_list=[]\n for i in range(len(weight)):#打印每类文本的tf-idf词语权重,第一个for遍历所有文本,第二个for便利某一类文本下的词语权重 \n print (\"-------这里输出第\",i,u\"类文本的词语tf-idf权重------\") \n key={}\n for j in range(len(word)): \n key[word[j]]=weight[i][j]\n list=sorted(key.items(),key = lambda x:x[1],reverse = True)\n print(list[0:5][0][1])\n \n words=[]\n weigths=[]\n for row in range(len(list[0:5])):\n print(list[0:5][row][0])\n words.append(list[0:5][row][0])\n weigths.append(str(list[0:5][row][1]))\n alone1=','.join(words)\n weigth2=','.join(weigths) \n df_list.append([i,alone1,weigth2])\n \n df=pd.DataFrame(df_list,columns=['id','tf_idf_keyword','tf_idf_weight'])\n df.to_excel(save_path)\n\n \n \n\t\n\t" }, { "alpha_fraction": 0.5373431444168091, "alphanum_fraction": 0.5696357488632202, "avg_line_length": 31.670000076293945, "blob_id": "a4f4e52ead1330ec50f9a4ebe1073591896face0", "content_id": "713349b762a5cb95c4fbecc7042b3d8bd4e29ff2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6680, "license_type": "no_license", "max_line_length": 97, "num_lines": 200, "path": "/DL/CNN/卷积神经网络.py", "repo_name": "Mengjiao926/GitHub_code", "src_encoding": "UTF-8", "text": "##!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mping\nimport pandas as pd\nfrom scipy import misc\nimport os\nimport glob\nimport tensorflow as tf\nimport time\nimport numpy as np\n\n\nw=100\nh=100\nc=3\n\ndef readim(path):\n lena = mping.imread(path)\n l_shape=lena.shape\n print(l_shape)\n if len(l_shape)==3:\n lean1 = misc.imresize(lena, [100, 100, 3])\n lean2=True\n else:\n lean1=0\n lean2=False\n return lean1,lean2\n\n\ndef read_img():\n path = path = 'C:/Users/85242/Desktop/神经网络/图片数据/'\n cate = cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]\n imgs = []\n labels = []\n path1=[]\n for idx, folder in enumerate(cate):\n print(idx, folder)\n for im in glob.glob(folder + '/*.jpg'):\n print('reading the images:%s' % (im))\n img,flag = readim(im)\n if flag==True:\n imgs.append(img)\n labels.append(idx)\n path1.append(im)\n\n return path1,np.asarray(imgs,np.float32), np.asarray(labels,np.int32)\n\n\ndef data_hand(data,label):\n # 数据打乱\n num_example = data.shape[0]\n arr = np.arange(num_example)\n np.random.shuffle(arr)\n data = data[arr]\n label = label[arr]\n #数据拆分\n ratio = 0.8\n s = np.int(num_example * ratio)\n x_train = data[:s]\n y_train = label[:s]\n x_val = data[s:]\n y_val = label[s:]\n return x_train,y_train,x_val,y_val\n\ndef minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):\n assert len(inputs) == len(targets)\n if shuffle:\n indices = np.arange(len(inputs))\n np.random.shuffle(indices)\n for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batch_size]\n else:\n excerpt = slice(start_idx, start_idx + batch_size)\n yield inputs[excerpt], targets[excerpt]\n\n\nif __name__ == '__main__':\n\t\n x = tf.placeholder(tf.float32, shape=[None, w, h, c], name='x')\n y_ = tf.placeholder(tf.int32, shape=[None, ], name='y_')\n # 第一个卷积层(200——>100)\n conv1 = tf.layers.conv2d(\n inputs=x,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n # 第二个卷积层(50->25)\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n # 第三个卷积层(25->12)\n conv3 = tf.layers.conv2d(\n inputs=pool2,\n filters=128,\n kernel_size=[3, 3],\n padding=\"same\",\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))\n pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)\n\n # 第四个卷积层(12->6)\n\n conv4 = tf.layers.conv2d(\n inputs=pool3,\n filters=128,\n kernel_size=[3, 3],\n padding=\"same\",\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))\n pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)\n\n re1 = tf.reshape(pool4, [-1, 6 * 6 * 128])\n\n # 全连接层\n dense1 = tf.layers.dense(inputs=re1,\n units=1024,\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))\n dense2 = tf.layers.dense(inputs=dense1,\n units=512,\n activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))\n dense2=tf.nn.dropout(dense2, 0.5)\n logits = tf.layers.dense(inputs=dense2,\n units=10,\n activation=None,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))\t\n\t\t \n loss = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=logits)\n train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)\n correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_)\n acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n n_epoch = 16\n batch_size = 64\n saver = tf.train.Saver()\n\n\n\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n path,data, label = read_img()\n num_example = data.shape[0]\n arr = np.arange(num_example)\n np.random.shuffle(arr)\n data = data[arr]\n label = label[arr]\n\n # 将所有数据分为训练集和验证集\n ratio = 0.8\n s = np.int(num_example * ratio)\n x_train = data[:s]\n y_train = label[:s]\n\n path_val=path[s:]\n x_val = data[s:]\n y_val = label[s:]\n\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n for epoch in range(1):\n start_time = time.time()\n train_loss, train_acc, n_batch = 0, 0, 0\n for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True):\n #print(x_train_a, y_train_a)\n _, err, ac = sess.run([train_op, loss, acc], feed_dict={x: x_train_a, y_: y_train_a})\n train_loss += err;\n train_acc += ac;\n n_batch += 1\n print(\" train loss: %f\" % (train_loss / n_batch))\n print(\" train acc: %f\" % (train_acc / n_batch))\n save_path = saver.save(sess, \"C:/Users/85242/Desktop/神经网络/结果/save_net.ckpt\")\n\t\n\n val_loss, val_acc, n_batch = 0, 0, 0\n for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False):\n err, ac = sess.run([loss, acc], feed_dict={x: x_val_a, y_: y_val_a})\n val_loss += err;\n val_acc += ac;\n n_batch += 1\n print(\" validation loss: %f\" % (val_loss / n_batch))\n print(\" validation acc: %f\" % (val_acc / n_batch))\n saver.save(sess, 'C:/Users/85242/Desktop/神经网络/结果/save_net.ckpt')\n sess.close()\n" }, { "alpha_fraction": 0.6127946376800537, "alphanum_fraction": 0.6388888955116272, "avg_line_length": 27.612903594970703, "blob_id": "6bfb782ecc77e64f191c1a9aaff53be30d2a7a74", "content_id": "563251f44e44768279328fed965dc97e907eb480", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3882, "license_type": "no_license", "max_line_length": 91, "num_lines": 124, "path": "/ML/特征选择打分/运行主程序.py", "repo_name": "Mengjiao926/GitHub_code", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 6 06:48:20 2018\n\n@author: 85242\n\"\"\"\n\n#导入数据分析包\nimport numpy as np\nimport pandas as pd\nimport sklearn as sk\nfrom os.path import dirname\nimport os\nimport sys\n\n\n# 基础路径:即代码所在路径\nBASEPATH=str(os.getcwd()).replace('\\\\','/')\n\n\nf=open(BASEPATH+'/entry09.csv')\ndf=pd.read_csv(f,low_memory=False)\n\n#查看标签数据有哪些类别,且做数值转换\nlabe=df['266']\ncategory = pd.Categorical(labe)\ndf['266']=category.labels\n\ndf1=df.select_dtypes(include=['object'])\n\n\n#此部分为数据预处理部分\ndf1=df.select_dtypes(include=['object'])\nprint(df1.head(5))\ncolums=df1.columns\nprint(colums)\nfor colum in colums:\n \n if int(colum)<=86:\n labe=df1[colum]\n category = pd.Categorical(labe)\n df1[colum]=category.labels\n else:\n df1[colum]=df1[colum].replace('?',0)\n df1[colum]=df1[colum].astype(np.float64)\n\n#删除object数据类型 \nfor colum in colums:\n df.drop(colum,axis=1, inplace=True)\n \ndf=pd.concat([df1,df],axis=1)\nprint(df.head(5))\n \nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import recall_score \nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import classification_report\n#保存结果\nfile_out=open(BASEPATH+'/result2.csv',mode='w')\ndef save_result(rfr,x,y,type,cnt):\n \n if type==1:\n a=accuracy_score(y, rfr.predict(x) )\n b=recall_score(y, rfr.predict(x),average='micro')\n c=f1_score(y, rfr.predict(x),average='micro')\n d=classification_report(y, rfr.predict(x))\n aline=format('%s,%s,%s,%s,%s'%('信息熵',cnt,a,b,c))\n print(aline)\n file_out.write(aline+'\\n')\n file_out.flush()\n \n else: \n a=accuracy_score(y, rfr.predict(x) )\n b=recall_score(y, rfr.predict(x),average='micro')\n c=f1_score(y, rfr.predict(x),average='micro')\n d=classification_report(y, rfr.predict(x))\n aline=format('%s,%s,%s,%s,%s'%('fiter',cnt,a,b,c))\n print(aline)\n file_out.write(aline+'\\n')\n file_out.flush() \n \n \n#选择RF(随机森林)建立模型,并且对特征进行打分。随机森林打分的最终落脚点为CART树的基尼指数(基尼指数越高代表信息越确定),跟熵的性质差不多,熵是衡量信息的不确定性\nfrom sklearn.utils import shuffle\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\n\n#打乱数据集\ndf=shuffle(df)\n#获取x,y\nx= df.iloc[:,:248] \ny=df.iloc[:,248:249]\n\nrfr = RandomForestClassifier(random_state=0, n_estimators=2000, n_jobs=-1)\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=0) \n\nnames=list(x.columns)\n\nclf = ExtraTreesClassifier(criterion='entropy')\nX_new = clf.fit(x, y)\nnames=sorted(zip(map(lambda x: round(x, 4), clf.feature_importances_), names),reverse=True)\n\nlist_names=[]\nfor row in range(len(names)):\n list_names.append(names[row][1])\n if ((row%20==0) or (row==247)) and (row>166):\n #信息熵训练模型\n row=row+1\n x_train1=x_train[list_names]\n x_test1=x_test[list_names]\n print(x_test1.shape) \n rfr.fit(x_train1,y_train) \n save_result(rfr,x_test1,y_test,1,row)\n #卡方验证输出top前N特征\n \n X_new = SelectKBest(chi2, k=row).fit_transform(x_train, y_train) \n X_new1 = SelectKBest(chi2, k=row).fit_transform(x_test, y_test)\n print(X_new1.shape)\n rfr.fit(X_new,y_train)\n save_result(rfr,X_new1,y_test,2,row)\n\n " } ]
4
myhololens/airtest-selenium
https://github.com/myhololens/airtest-selenium
c471752adb99250d97a96274f34d952115a8f8b9
64df46a8c7e258bc900f6e0daeb07b2b802ad7c3
de3fad89dea65e8302f94dca81905ce4455e7f30
refs/heads/master
2020-05-25T02:23:26.136459
2018-08-07T02:40:29
2018-08-07T02:40:29
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7024608254432678, "alphanum_fraction": 0.7136465311050415, "avg_line_length": 34.7599983215332, "blob_id": "bceb449fb830f3a6e12ba5a2a4a4366288fe5610", "content_id": "9339efcb606b68d51b1e2a9deb6c6399ae6c76b4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 894, "license_type": "permissive", "max_line_length": 109, "num_lines": 25, "path": "/setup.py", "repo_name": "myhololens/airtest-selenium", "src_encoding": "UTF-8", "text": "# coding=utf-8\nfrom setuptools import setup, find_packages\n\n\ndef parse_requirements(filename='requirements.txt'):\n \"\"\" load requirements from a pip requirements file. (replacing from pip.req import parse_requirements)\"\"\"\n lineiter = (line.strip() for line in open(filename))\n return [line for line in lineiter if line and not line.startswith(\"#\")]\n\n\nsetup(\n name='airtest-selenium',\n version='1.0.0',\n keywords=\"selenium, automation test, web ui automation\",\n description='Selenium with airtest test framework.',\n long_description='Selenium with airtest test framework. 2018 present by NetEase Games',\n packages=find_packages(),\n include_package_data=True,\n install_requires=parse_requirements(),\n license='Apache License 2.0',\n\n author='Netease Games',\n author_email='[email protected], [email protected]',\n url='https://github.com/AirtestProject/airtest-selenium',\n)\n" } ]
1
batmanav/ASPathInference
https://github.com/batmanav/ASPathInference
af6e6a2962f96061a8907532ec97586d7745b47b
d26b6c79e1658c9081055e4c22f03bb37f3554f0
265344ee25d3de4ed7c4e2048949443dd1489324
refs/heads/master
2021-01-18T16:45:34.588876
2017-02-15T11:52:20
2017-02-15T11:52:20
76,389,978
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.6071428656578064, "alphanum_fraction": 0.6344537734985352, "avg_line_length": 25.5, "blob_id": "b5228e3c3d021bc56d977d752c0b39567eb8fb0b", "content_id": "bcc2e6acf50b9e42340c9b901c176fee9200a4c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 150, "num_lines": 18, "path": "/migrations/mysqlcaidarel.py", "repo_name": "batmanav/ASPathInference", "src_encoding": "UTF-8", "text": "import MySQLdb\n\ndb = MySQLdb.connect(\"localhost\",\"root\",\"manav\",\"gao\")\n\nc = db.cursor()\n\n\nc.execute('CREATE TABLE caidarel (AS1 text, AS2 text, relationship int)')\ncol1 = 'AS1'\ncol2 = 'AS2'\nwith open(\"caidarel.txt\") as f:\n\tfor line in f:\n\t\ttemp = line.split(' ')\n\t\t# print temp[0], temp[1].strip('\\n')\n\t\tc.execute(\"INSERT INTO caidarel (AS1, AS2, relationship) VALUES ('{pp}', '{lol}', '{x}')\".format(pp=str(temp[0]), lol=str(temp[1]), x=str(temp[2])))\n\ndb.commit()\ndb.close()" }, { "alpha_fraction": 0.5846682190895081, "alphanum_fraction": 0.5949656963348389, "avg_line_length": 35.375, "blob_id": "eec8f799fb878cbc1b21a5315127c3e068838e55", "content_id": "c8a365522d95be59692f9953a629d91ca09238a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 874, "license_type": "no_license", "max_line_length": 102, "num_lines": 24, "path": "/generate_txt.py", "repo_name": "batmanav/ASPathInference", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\nif len(sys.argv) < 3:\n print \"Usage: python generate_txt.py <from_folder> <to_folder>\"\n exit()\n\nfolderstart=sys.argv[1]\nfolderend=sys.argv[2]\n\nprint \"generating txt file for folder from \"+str(folderstart)+\" to \"+str(folderend)\n\nfor folder in range(int(folderstart), (int(folderend)+1)):\n\tfolder_path='./'+str(folder)\n\tfor file in os.listdir(folder_path):\n \tif file.endswith(\".bin\"):\n \tprint folder_path+' '+file\n\t file_no_ext = os.path.splitext(file)[0]\n\t file_path=str(folder)+'/'+file\n\t save_path=str(folder)+'/'+file_no_ext+'.txt'\n\t\t\t\t\t#cat RIB01.bin | ./../zebra-dump-parser/zebra-dump-parser.pl >RIB01.txt\n\t command = 'cat '+file_path+' | ./zebra-dump-parser/zebra-dump-parser.pl > '+save_path\n\t\t\t\t\tprint command\n\t os.system(command)\n\n" }, { "alpha_fraction": 0.703797459602356, "alphanum_fraction": 0.7088607549667358, "avg_line_length": 17.85714340209961, "blob_id": "66b742ce6a15eedddf14d7041848bb73f4d1c8bf", "content_id": "1c2d4f0501ffe886d04fb2f8b2f8d10dc975c6b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 49, "num_lines": 21, "path": "/pathinference.py", "repo_name": "batmanav/ASPathInference", "src_encoding": "UTF-8", "text": "from dbmanager import DatabaseManager\nfrom utility import *\nimport sys\n\nif __name__ == '__main__':\n\n\tif len(sys.argv) < 2:\n\t\tprint \"Usage: python pathinference.py <prefix>\"\n\t\texit()\n\telse:\n\t\tprefix = sys.argv[1]\n\n\tprint \"Running for prefix: \", prefix\n\n\tallAS = baseAS(prefix)\n\tinitpath(prefix)\n\tfrequency(prefix)\n\tpathlength(prefix)\n\tpathinference(prefix, allAS)\n\n\t#generate best paths text file" }, { "alpha_fraction": 0.6098654866218567, "alphanum_fraction": 0.6300448179244995, "avg_line_length": 22.421052932739258, "blob_id": "f6ea65fab54d96ea8407524d4e767b8839dd7867", "content_id": "389f01f61fecac1b438a005601f6a0700e57d0f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 446, "license_type": "no_license", "max_line_length": 125, "num_lines": 19, "path": "/migrations/sortfile.py", "repo_name": "batmanav/ASPathInference", "src_encoding": "UTF-8", "text": "import sqlite3\n\nconn = sqlite3.connect('../test.sqlite')\nc = conn.cursor()\n\nc.execute('CREATE TABLE test (prefix text, ASes text)')\ncol1 = 'prefix'\ncol2 = 'ASes'\nwith open(\"import.txt\") as f:\n\tfor line in f:\n\t\ttemp = line.split(' ', 1)\n\t\tprint temp[0], temp[1].strip('\\n')\n\t\tc.execute(\"INSERT INTO test (prefix, ASes) VALUES ('{pp}', '{lol}')\".format(pp=str(temp[0]), lol=str(temp[1].strip('\\n'))))\n\n# conn.commit()\n\n\nconn.commit()\nconn.close()\n\n" }, { "alpha_fraction": 0.5987654328346252, "alphanum_fraction": 0.6098765134811401, "avg_line_length": 24.3125, "blob_id": "fceb1db481aead647a1f7f4fc70fbdbc81d2216c", "content_id": "8f58254bbfa2a83dbe0ba64e5d9afd7cf172c978", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 810, "license_type": "no_license", "max_line_length": 82, "num_lines": 32, "path": "/decompress_ribs.py", "repo_name": "batmanav/ASPathInference", "src_encoding": "UTF-8", "text": "import bz2\nimport sys\nimport os\n\nfrom os.path import basename\n\nif len(sys.argv) < 3:\n print \"Usage: python decompress_ribs.py <from_folder> <to_folder>\"\n exit()\n\nfolder='./'+sys.argv[1]\n\nfolderstart=sys.argv[1]\nfolderend=sys.argv[2]\n\n\nprint \"decompressing file for folder from \"+str(folderstart)+\" to \"+str(folderend)\n\nfor folder in range(int(folderstart), (int(folderend)+1)):\n\tfolder_path='./'+str(folder)\n\tfor file in os.listdir(folder_path):\n\t\tif file.endswith(\".bz2\"):\n\t \tprint file\n\n \t\tfile_no_ext = os.path.splitext(file)[0]\n\t \tfile_path=folder_path+'/'+file\n \t\tsave_path=folder_path+'/'+file_no_ext+'.bin'\n \t\tf = open(file_path, 'r')\n \t\tdata=f.read()\n \t\tout=bz2.decompress(data)\n \t\tfo = open(save_path,'w')\n \t\tfo.write(out)\n" }, { "alpha_fraction": 0.5702005624771118, "alphanum_fraction": 0.5730658769607544, "avg_line_length": 20.18181800842285, "blob_id": "62675e7cf2d96e5827f0fa51638371242d63f626", "content_id": "4f9780ec976cad50535e277dcbd6d6bc31423d6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 698, "license_type": "no_license", "max_line_length": 44, "num_lines": 33, "path": "/dbmanager.py", "repo_name": "batmanav/ASPathInference", "src_encoding": "UTF-8", "text": "import sqlite3\nimport MySQLdb\n\nclass DatabaseManager(object):\n def __init__(self, db):\n self.conn = sqlite3.connect(db)\n self.conn.commit()\n self.cur = self.conn.cursor()\n\n def query(self, arg):\n self.cur.execute(arg)\n self.conn.commit()\n return self.cur\n\n def __del__(self):\n self.conn.close()\n\n\nclass MyDatabaseManager:\n\n dbc = (\"localhost\",\"root\",\"manav\",\"gao\")\n\n def __init__(self):\n self.db = MySQLdb.connect(*self.dbc)\n self.cursor = self.db.cursor()\n\n def query(self, arg):\n self.cursor.execute(arg)\n self.db.commit()\n return self.cursor\n\n def __del__(self):\n self.cursor.close()" }, { "alpha_fraction": 0.6671069860458374, "alphanum_fraction": 0.6789960265159607, "avg_line_length": 26, "blob_id": "91fe6c3bd749cdb239fb0d4f5cbb1b1b65befcd3", "content_id": "7257b553595d8ffcb660170b1182807cfac145e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 757, "license_type": "no_license", "max_line_length": 77, "num_lines": 28, "path": "/zebra-dump-parser/drop-stats/doit", "repo_name": "batmanav/ASPathInference", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\n# set $format = 2 in zebra-dump-parser.pl.\n\nPATH=\".:$PATH\"\n\nwget -O data/drop http://www.spamhaus.org/drop/drop.lasso\nwget -O data/asn.tmp http://www.potaroo.net/bgp/iana/asn-ctl.txt \\\n && mv data/asn.tmp data/asn || true\n\nwget -c --progress=dot:mega -O data/bview.current.gz \\\n http://data.ris.ripe.net/rrc00/$(date +%Y.%m)/bview.$(date +%Y%m%d).0800.gz\nzcat data/bview.*.gz | nice zebra-dump-parser.pl > data/routes.tmp\nmv data/routes.tmp data/routes\nrm -f data/bview.*.gz data/routes.cache\n\nnice -n 19 \\\ndrop-stats --as-names=data/asn --drop=data/drop --routes=data/routes \\\n > data/drop-stats.txt.tmp\n\n{\n cat info.txt\n printf \"Generated on: \"\n date\n echo\n cat data/drop-stats.txt.tmp\n} > drop-stats.txt\nrm data/drop-stats.txt.tmp\n\n" }, { "alpha_fraction": 0.5847457647323608, "alphanum_fraction": 0.6610169410705566, "avg_line_length": 29.13953399658203, "blob_id": "59913f1765896572825b239d61f81428ad6f2718", "content_id": "7aab978fc34ecfffffa72fc30e8816922dd1beac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1298, "license_type": "no_license", "max_line_length": 164, "num_lines": 43, "path": "/getribs.py", "repo_name": "batmanav/ASPathInference", "src_encoding": "UTF-8", "text": "# Get rib files from http://archive.routeviews.org for the urls specifies in routeviews-urls.txt\nimport urllib\nimport urllib2\nimport requests\nimport os\n\n# Modify these 4 parameters to get RIBS for different time.\nmonth='07' # month to get ribs of.\nyear='2016' # year to get ribs of.\ntime='0600' # time of ribs collection\nDAY_OF_MONTH=20 # day of rib collection\n# url = 'http://archive.routeviews.org/route-views.kixp/bgpdata/2016.07/RIBS/rib.20160701.1200.bz2'\n\n\nDAY_STR=['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']\n\n# File containting URL of path collectors\nURLS = './routeviews-urls.list'\n\n\nwith open(URLS) as f:\n routeviewsurls = f.readlines()\n\nfolder_no=1;\n\nfor baseurl in routeviewsurls:\n\t\n\tfolder_path='./'+str(folder_no)+'/'\n\tfolder_no=folder_no+1\n\n\tif not os.path.exists(folder_path):\n\t\tos.makedirs(folder_path)\n\t\n\tbaseurl=baseurl.rstrip()\n\tribsurl=baseurl+'/'+year+'.'+month+'/RIBS/rib.'+year+month\n\n\tday=DAY_OF_MONTH\n\tdownloadurl=ribsurl+DAY_STR[day]+'.'+time+'.bz2'\n print \"dowloading \"+downloadurl+' ...'\n save_path=folder_path+'RIB'+DAY_STR[day]+'.bz2'\n urllib.urlretrieve(downloadurl, save_path)\n print 'done'\n print save_path+\"\\n\"\n\n\n" }, { "alpha_fraction": 0.603347659111023, "alphanum_fraction": 0.6246928572654724, "avg_line_length": 30.298076629638672, "blob_id": "b88c5890eb6cabb3f8242637731e0b58beeb7f54", "content_id": "1a6c04c19ef2570fa33f7cb270102368599da60f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6512, "license_type": "no_license", "max_line_length": 178, "num_lines": 208, "path": "/utility.py", "repo_name": "batmanav/ASPathInference", "src_encoding": "UTF-8", "text": "from dbmanager import DatabaseManager, MyDatabaseManager\nfrom collections import Counter, deque\n\ndef baseAS(prefix):\n\t# db = DatabaseManager('test.sqlite')\n\tdb = MyDatabaseManager()\t\n\n\tallAS = set()\n\tresult = db.query('SELECT ASes FROM test WHERE prefix = \"%s\"' % (prefix))\n\tfor row in result:\n\t\tfor i in row[0].split():\n\t\t\tallAS.add(i)\n\treturn list(allAS)\n\ndef initpath(prefix):\n\t# db = DatabaseManager('test.sqlite')\t\n\tdb = MyDatabaseManager()\t\n\n\n\tdb.query('CREATE TABLE IF NOT EXISTS Manav (autos text, uncertainty int, frequency int, pathlength int, actualpath varchar(255) UNIQUE)') # % (prefix))\n\n \tinsertvalues = []\n\tresult = db.query('SELECT ASes FROM test WHERE prefix = \"%s\"' % (prefix))\n\tfor row in result:\n\t\tstartingpath = \" \".join(row[0].split()[::-1])\n\t\ttemp = startingpath.split(' ')\n\t\tx = 0\n\t\twhile x < len(temp):\n\t\t\tbuf = ' '.join(temp[:x+1])\n\t\t\t# insertvalues.append('INSERT OR IGNORE INTO \"%s\" VALUES (\"%s\", 0, 0, 0, \"%s\")' % (prefix, buf.split(' ')[-1], buf))\n\t\t\tinsertvalues.append('INSERT IGNORE INTO Manav VALUES (\"%s\", 0, 0, 0, \"%s\")' % (buf.split(' ')[-1], buf))\n\t\t\tx += 1\n\n\tfor i in insertvalues:\n\t\tdb.query(i)\n\ndef frequency(prefix):\n\t# db = DatabaseManager('test.sqlite')\n\tdb = MyDatabaseManager()\t\n\n\n\n\tresult = db.query('SELECT ASes FROM test WHERE prefix = \"%s\"' % (prefix))\n\n\trows = []\n\n\tfor row in result:\n\t\trows.append(row[0])\n\n\t# newresult = db.query('SELECT actualpath FROM \"%s\"' % (prefix))\n\tnewresult = db.query('SELECT actualpath FROM Manav') # % (prefix))\n\tallpaths = []\n\tfor row in newresult:\n\t\tallpaths.append(\" \".join(row[0].split(' ')[::-1]))\n\n\ttodo = []\n\n\tfor apaths in allpaths:\n\t\tfreq = 0\n\t\tfor path in rows:\n\t\t\ti = path.split(' ')\n\t\t\tj = apaths.split(' ')\n\t\t\tif not Counter(j) - Counter(i):\n\t\t\t\tfreq += 1\n\t\t# todo.append('UPDATE \"%s\" SET frequency = %d WHERE actualpath = \"%s\"' % (prefix, freq, \" \".join(apaths.split(' ')[::-1])))\n\t\ttodo.append('UPDATE Manav SET frequency = %d WHERE actualpath = \"%s\"' % (freq, \" \".join(apaths.split(' ')[::-1])))\n\n\tfor i in todo:\n\t\tdb.query(i)\n\ndef pathlength(prefix):\n\t# db = DatabaseManager('test.sqlite')\t\n\tdb = MyDatabaseManager()\t\n\n\t# result = db.query('SELECT actualpath FROM \"%s\"' % (prefix))\n\tresult = db.query('SELECT actualpath FROM Manav') # % (prefix))\n\ttodo = []\n\tfor row in result:\n\t\tpathl = len(row[0].split())\n\t\ttodo.append('UPDATE Manav SET pathlength = %d WHERE actualpath = \"%s\"' % (pathl, row[0]))\n\n\tfor i in todo:\n\t\tdb.query(i)\n\ndef pathinference(prefix, baseAS):\n\tq = deque(baseAS)\n\t# db = DatabaseManager('test.sqlite')\n\tdb = MyDatabaseManager()\t\n\n\twhile len(q) > 0:\n\t\tcurrent_as = q.popleft()\n\t\tpeers_of_current_as = getpeers(current_as)\n\n\t\tfor peer in peers_of_current_as:\n\n\t\t\tif peer in baseAS:\n\t\t\t\tcontinue\n\n\t\t\ttpath = SPF(current_as, prefix)\n\t\t\tpath = tpath[0]\n\t\t\tul = tpath[1]\n\t\t\tpl = tpath[2]\n\t\t\tfreq = tpath[3]\n\n\t\t\tvalleyfree = 0\n\t\t\t#Check between peer and path if valleyfree.\n\t\t\tif pl > 1:\n\t\t\t\ts1 = db.query('SELECT relationship from caidarel WHERE AS1 = \"%s\" and AS2 = \"%s\"' % (path.split()[-2], path.split()[-1]))\n\t\t\t\ts2 = db.query('SELECT relationship from caidarel WHERE AS2 = \"%s\" and AS1 = \"%s\"' % (path.split()[-2], path.split()[-1]))\n\t\t\t\trel = 0\n\t\t\t\tif s2 == -1:\n\t\t\t\t\trel = 1\n\t\t\t\telif s2 == 0 or s1 == 0:\n\t\t\t\t\trel = 2\n\t\t\t\telif s1 == -1:\n\t\t\t\t\trel = 0\n\n\t\t\t\tif rel == 0:\n\t\t\t\t\ts1 = db.query('SELECT relationship from caidarel WHERE AS1 = \"%s\" and AS2 = \"%s\"' % (peer, path.split()[-1]))\n\t\t\t\t\ts2 = db.query('SELECT relationship from caidarel WHERE AS2 = \"%s\" and AS1 = \"%s\"' % (peer, path.split()[-1]))\n\t\t\t\t\tif s1 == -1 or s1 == 0:\n\t\t\t\t\t\tvalleyfree = 1\n\t\t\t\t\telif s2 == -1 or s2 == 0:\n\t\t\t\t\t\tvalleyfree = 1\n\t\t\t\telif rel == 1:\n\t\t\t\t\ts1 = db.query('SELECT relationship from caidarel WHERE AS1 = \"%s\" and AS2 = \"%s\"' % (peer, path.split()[-1]))\n\t\t\t\t\tif s1 == -1:\n\t\t\t\t\t\tvalleyfree = 1\t\t\n\t\t\t\telif rel == 2:\n\t\t\t\t\ts1 = db.query('SELECT relationship from caidarel WHERE AS1 = \"%s\" and AS2 = \"%s\"' % (peer, path.split()[-1]))\n\t\t\t\t\tif s1 == -1:\n\t\t\t\t\t\tvalleyfree = 1\n\t\t\telse:\n\t\t\t\ts1 = db.query('SELECT relationship from caidarel WHERE AS1 = \"%s\" and AS2 = \"%s\"' % (peer, path.split()[-1]))\n\t\t\t\ts2 = db.query('SELECT relationship from caidarel WHERE AS2 = \"%s\" and AS1 = \"%s\"' % (peer, path.split()[-1]))\n\t\t\t\tif s2 == 0 or s2 == -1:\n\t\t\t\t\tvalleyfree = 1\n\t\t\t\tif s1 == 0 or s1 == -1:\n\t\t\t\t\tvalleyfree = 1\n\n\t\t\tif not valleyfree:\n\t\t\t\tcontinue\n\n\t\t\ttemp_best = SPF(peer, prefix)\n\n\t\t\tif temp_best[0] == -1:\n\t\t\t\tadd2q = 0\n\t\t\telse:\n\t\t\t\tadd2q = 1\n\n\t\t\tinserted = insertpath(path, peer, ul+1, pl+1, freq, prefix)\n\n\t\t\tif add2q == 1 and temp_best[0] != inserted and peer not in q and temp_best != -1:\n\t\t\t\tq.append(peer)\n\ndef getpeers(AS):\n\tpeers = set()\n\t# db = DatabaseManager('test.sqlite')\n\tdb = MyDatabaseManager()\t\n\n\tresult = db.query('SELECT AS1 from caidarel WHERE AS2 = \"%s\"' % (AS))\n\tfor row in result:\n\t\tpeers.add(row[0])\n\tresult = db.query('SELECT AS2 from caidarel WHERE AS1 = \"%s\"' % (AS))\n\tfor row in result:\n\t\tpeers.add(row[0])\n\treturn peers\n\ndef insertpath(path, peer, uncertainty, pathlength, freq, prefix):\n\t# db = DatabaseManager('test.sqlite')\n\tdb = MyDatabaseManager()\t\n\n\tnewpath = str(path) + \" \" + peer\n\t# print peer, uncertainty, freq, pathlength, newpath\n\t# result = db.query('INSERT OR IGNORE INTO \"%s\" VALUES (\"%s\", %d, %d, %d, \"%s\")' % (prefix, peer, uncertainty, freq, pathlength, newpath))\n\tresult = db.query('INSERT IGNORE INTO Manav VALUES (\"%s\", %d, %d, %d, \"%s\")' % (peer, uncertainty, freq, pathlength, newpath))\n\treturn newpath\n\ndef SPF(AS, prefix):\n\t# db = DatabaseManager('test.sqlite')\n\tdb = MyDatabaseManager()\t\n\n\n\t# result = db.query('SELECT actualpath, uncertainty, pathlength, frequency FROM \"%s\" WHERE autos = \"%s\" ORDER BY pathlength, uncertainty DESC, frequency LIMIT 1' % (prefix, AS))\n\tresult = db.query('SELECT actualpath, uncertainty, pathlength, frequency FROM Manav WHERE autos = \"%s\" ORDER BY pathlength, uncertainty DESC, frequency LIMIT 1' % (AS))\n\tbestpath = []\n\tfor row in result:\n\t\tbestpath.append(row[0])\n\t\tbestpath.append(row[1])\n\t\tbestpath.append(row[2])\n\t\tbestpath.append(row[3])\n\t\treturn bestpath\n\telse:\n\t\treturn [-1]\n\ndef LUF(AS, prefix):\n\t# db = DatabaseManager('test.sqlite')\t\n\tdb = MyDatabaseManager()\n\tresult = db.query('SELECT actualpath, uncertainty, pathlength, frequency FROM \"%s\" WHERE autos = \"%s\" ORDER BY uncertainty DESC, frequency, pathlength LIMIT 1' % (prefix, AS))\n\tbestpath = []\n\tfor row in result:\n\t\tbestpath.append(row[0])\n\t\tbestpath.append(row[1])\n\t\tbestpath.append(row[2])\n\t\tbestpath.append(row[3])\n\t\treturn bestpath\n\telse:\n\t\treturn [-1]\n\n\n" }, { "alpha_fraction": 0.6177777647972107, "alphanum_fraction": 0.6333333253860474, "avg_line_length": 25.52941131591797, "blob_id": "1e371bcefb705caca73277c9cc918ecbc7ada70d", "content_id": "7ae037f30256eb0fa827e6fe5840f28a38f72d8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 450, "license_type": "no_license", "max_line_length": 125, "num_lines": 17, "path": "/migrations/mysqlsortfile.py", "repo_name": "batmanav/ASPathInference", "src_encoding": "UTF-8", "text": "import MySQLdb\n\ndb = MySQLdb.connect(\"localhost\",\"root\",\"manav\",\"gao\" )\n\nc = db.cursor()\n\nc.execute('CREATE TABLE IF NOT EXISTS test (prefix text, ASes text)')\ncol1 = 'prefix'\ncol2 = 'ASes'\nwith open(\"import.txt\") as f:\n\tfor line in f:\n\t\ttemp = line.split(' ', 1)\n\t\tprint temp[0], temp[1].strip('\\n')\n\t\tc.execute(\"INSERT INTO test (prefix, ASes) VALUES ('{pp}', '{lol}')\".format(pp=str(temp[0]), lol=str(temp[1].strip('\\n'))))\n\ndb.commit()\ndb.close()" }, { "alpha_fraction": 0.7232645153999329, "alphanum_fraction": 0.7382739186286926, "avg_line_length": 24.987804412841797, "blob_id": "e81ddc3d431093edb41a1ac4e0405cc0838eb51e", "content_id": "26169dc0a4ec786748c3274d31339999e32eb2e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2132, "license_type": "no_license", "max_line_length": 135, "num_lines": 82, "path": "/README.md", "repo_name": "batmanav/ASPathInference", "src_encoding": "UTF-8", "text": "\n## AS Path Inference\n\n\n---------\n\n\n### Installation\n\n---------\nPrerequisites:\n\n - Python 2.7\n - sqlite3\n\n#####CAIDA AS Relationship Database:\n\nGet the as-rel file from: http://data.caida.org/datasets/as-relationships/serial-1/\nThe as-rel files contain p2p and p2c relationships. The format is:\n\n <provider-as>|<customer-as>|-1\n\n <peer-as>|<peer-as>|0\n\nTo import this file into the database, update the filename in the caidarel.py file in the migrations folder, and then run caidarel.py. \n\n\n#####RIBS:\nRIBS are taken from http://archive.routeviews.org/\nFiles:\n\n1. ./routeviews-urls.list : List of URL of collectors(currently 18) available at archive.routeviews.org\n\n2. ./getribs.py : \nUsage: python ./getribs.py\nGet rib files from http://archive.routeviews.org for the urls specifies in routeviews-urls.list\nModify time,day,year,month parameters to get RIB of that time. \nRIBS are downloaded in 18 folders named by their respective numbers.\n\n3. ./decompress_ribs.py :\nUsage: python decompress_ribs.py from_folder to_folder\nDecompress RIBS downloaded in folder beginning from_folder to to_folder\n\n4. ./generate_txt.py\nUsage: python generate_txt.py from_folder to_folder\nGenerated txt file of RIBS from decompressed RIBS beginning from_folder to to_folder\nNOTE: Needs ./zebra-dump-parser in directory.\n\n5. ./zebra-dump-parser\n\nSteps:\n\n1. Run python ./getribs.py\n\n2. Run python decompress_ribs.py from_folder to_folder. Decompression may take time. Divide tasks in small range of \nfrom_folder to_folder\n\n3. Run python generate_txt.py from_folder to_folder\n\n4. After this txt RIB files are created in respective folders. use cat to combine all files in a single file\neg. cat ./1/RIB21.txt ./2/RIB21.txt ... ./18/RIB21.txt > ribout.txt\n\nAfter generating ribout.txt:\n\n- Run sortfile.py from migrations folder after changing to filename in sortfile.py\n\n\n### Usage\n\n---------\nClone the repo\n\nUsage: `python pathinference.py <prefix>`\n\nUsing directly from the database:\n\n- Load the database using: sqlite3 test.db\n- '.tables' to view all tables\n\n\n\n--------\nLink to the paper: http://rio.ecs.umass.edu/mnilpub/papers/aspath_tech.pdf\n" } ]
11
sirget/Introduction-to-data-analytic
https://github.com/sirget/Introduction-to-data-analytic
ed392154aa9755fd837e1d59a351c46c47609a75
d5cf87af8ab7d82a88dbe717d4cd359c10ec6958
b649279816bf1dd5490f56b5fada9adf02c0c584
refs/heads/main
2023-04-09T04:22:21.875630
2021-04-19T16:03:43
2021-04-19T16:03:43
330,696,961
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5168585777282715, "alphanum_fraction": 0.5707237124443054, "avg_line_length": 24.07216453552246, "blob_id": "f0d300eb4ae7e4309b412658b5fa01aafc1e76f0", "content_id": "addcd8a8bb80dfaa191ffe681d52e66c2873f39a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2432, "license_type": "no_license", "max_line_length": 88, "num_lines": 97, "path": "/GPS.py", "repo_name": "sirget/Introduction-to-data-analytic", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 13 14:51:28 2021\n\n@author: Pnach\n\"\"\"\nimport glob\nimport numpy as np\nimport pandas as pd\nimport folium\nimport webbrowser\nfrom folium import plugins\n\n\n\"\"\"\npath = \"2018-12-25\"\nall_files = glob.glob(path + \"/*.csv\")\ndfs = []\nfor filename in all_files:\n df = pd.read_csv(filename, index_col=None, header=0)\n dfs.append(df)\nframe = pd.concat(dfs, axis=0, ignore_index=True)\n\"\"\"\n\n\ndef question_1():\n path = \"2018-12-25/2018-12-25.05.csv\"\n df = pd.read_csv(path)\n answer = len(df[\"vid\"].unique())\n print(answer)\n\n\ndef question_2():\n path = \"2018-12-25/2018-12-25.05.csv\"\n df = pd.read_csv(path)\n answer = df[[\"speed\", \"for_hire_light\"]\n ][df[\"speed\"] > 0][df[\"for_hire_light\"] == 0].mean()\n\n print(answer)\n\n\ndef question_3():\n path = \"2018-12-25/2018-12-25.05.csv\"\n df = pd.read_csv(path)\n answer = df[(df[\"vid\"] == \"bwmSJnFxrwrAUjqBcqCWOlj5f9Y\")\n & (df[\"for_hire_light\"] == 0)]\n answer = df[[\"timestamp\"]][df[\"vid\"] ==\n \"bwmSJnFxrwrAUjqBcqCWOlj5f9Y\"][df[\"for_hire_light\"] == 0]\n print(answer)\n\n\ndef question_4():\n path = \"2018-12-25\"\n all_files = glob.glob(path + \"/*.csv\")\n dfs = []\n for filename in all_files:\n df = pd.read_csv(filename, index_col=None, header=0)\n dfs.append(df)\n df = pd.concat(dfs, axis=0, ignore_index=True)\n answer = df[[\"speed\", \"for_hire_light\"]\n ][df[\"speed\"] > 0][df[\"for_hire_light\"] == 0].mean()\n print(answer)\n\n\ndef question_5():\n path = \"2018-12-25\"\n all_files = glob.glob(path + \"/*.csv\")\n dfs = []\n for filename in all_files:\n df = pd.read_csv(filename, index_col=None, header=0)\n dfs.append(df)\n big = pd.concat(dfs, axis=0, ignore_index=True)\n answer = big[(big[\"for_hire_light\"] == 0) &\n big[\"speed\"] != 0].groupby(\"vid\").max().mean()\n print(answer)\n\n\ndef question_6():\n path = \"2018-12-25\"\n all_files = glob.glob(path + \"/*.csv\")\n dfs = []\n taxi_in_hour = []\n for filename in all_files:\n df = pd.read_csv(filename, index_col=None, header=0)\n taxi_in_hour.append(len(df[\"vid\"].unique()))\n dfs.append(df)\n df = pd.concat(dfs, axis=0, ignore_index=True)\n answer = taxi_in_hour.index(max(taxi_in_hour))\n print(answer)\n\n\n# question_1()\n# question_2()\n# question_3()\n# question_4()\nquestion_5()\n# question_6()\n" }, { "alpha_fraction": 0.6399999856948853, "alphanum_fraction": 0.6866666674613953, "avg_line_length": 29.049999237060547, "blob_id": "799ee0a49fbecea4fb5dc3a897277eccfb529b31", "content_id": "ed62938906924b63fbe9ced9cb61541a4bfe4821", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 600, "license_type": "no_license", "max_line_length": 182, "num_lines": 20, "path": "/A2.py", "repo_name": "sirget/Introduction-to-data-analytic", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\npath = \"https://docs.google.com/spreadsheets/u/1/d/e/2PACX-1vRIFbIywXecgxN5c1sMn_KYWsARoXgp4paFxu4qndPaX_47vRaOdrqmiHjtNZ9ZYQcv3ubMSv8DA9ta/pub?gid=1546938151&single=true&output=csv\"\n\ndf = pd.read_csv(path)\n\nprint(df)\n\n#df['extra_salary'] = df['salary'] * df['department'].apply(lambda x : 0.2 if x == 'developer' else 0.5)\n\n#print(len(df))\n\n#df['name_with_age'] = df['name'] + ('(') + df['age'].astype(str) + (')')\n\nprint(df.groupby(['gender','department']).agg({'salary':'mean'}).loc['female'].idxmax().values[0])\n\nprint(df['department'].unique())\n\nprint(df)" } ]
2
anshuman73/facematch
https://github.com/anshuman73/facematch
f6ac0bd94c796ddb1f3af911d0ecb6aff9c219ac
08cc5f58825a1c98db0742dff4503117f932addc
4015484d4b85735f7200f5c61526e7a082cca57f
refs/heads/master
2020-04-08T05:23:03.743935
2019-06-21T19:53:50
2019-06-21T19:53:50
159,058,084
6
1
null
null
null
null
null
[ { "alpha_fraction": 0.7169811129570007, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 22.31999969482422, "blob_id": "d4f6c9f1807c77b38090372172906854c6355e58", "content_id": "4bc338d3b75d81ce911dfc26cbfd329075e9f10b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 583, "license_type": "no_license", "max_line_length": 102, "num_lines": 25, "path": "/app.py", "repo_name": "anshuman73/facematch", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_admin import Admin\nimport face_match\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://postgres:No1cancrackthis@localhost:5432/facematch'\napp.config['UPLOAD_FOLDER'] = '/uploads'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'this_is_a_secret_key'\n\ndb = SQLAlchemy(app)\nadmin = Admin(app, name='FaceMatch', template_mode='bootstrap3')\n\n\nfrom views import *\nfrom models import *\nfrom admin import *\n\ndb.create_all()\n\n\nif __name__ == '__main__':\n app.run()\n" }, { "alpha_fraction": 0.7748251557350159, "alphanum_fraction": 0.7846153974533081, "avg_line_length": 41.05882263183594, "blob_id": "0261f828c27f4ef9cb3a0dc33cf1f106ed887264", "content_id": "78781e3cd204aedc70c0b096726095144fec420d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 715, "license_type": "no_license", "max_line_length": 185, "num_lines": 17, "path": "/README.md", "repo_name": "anshuman73/facematch", "src_encoding": "UTF-8", "text": "# FaceMatch\n\nA Facial Recognition based Attendance System made for academic institutions.\n\nThe software can clean your face data, and store the different faces in the database.\n\nOn uploading an image of the class, it automatically picks up the faces of the students prensent, and compares them with the faces in the database, and marks attendance for those found.\n\nAccuracy acheieved in tests - 94%\n\n\n## Instructions\n\nCreate two directories - /uploads and /faces. Save all processed, cleaned faces in the /faces directly.\nCan use the face_recognition library used for cleaning of the facial data.\n\nSimply run the app.py file by using ```python3 app.py``` (Python2 not supported, preferred version is CPython 3.6.7)\n" }, { "alpha_fraction": 0.5924264788627625, "alphanum_fraction": 0.5962464809417725, "avg_line_length": 39.13999938964844, "blob_id": "bd0220792888e372e8df8ed2064158d5c05fb542", "content_id": "789f47fc87cb3a309fe9af9da156865e670b20cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6021, "license_type": "no_license", "max_line_length": 112, "num_lines": 150, "path": "/views.py", "repo_name": "anshuman73/facematch", "src_encoding": "UTF-8", "text": "from app import app, db\nfrom flask import session, redirect, url_for, render_template, abort, request, flash\nfrom forms import LoginForm\nfrom models import Student, Teacher, Course, Class, Attendance\nimport os\nfrom werkzeug.utils import secure_filename\nfrom face_match import give_match\n\n\nALLOWED_EXTENSIONS = ['png', 'jpg', 'jpeg', 'gif']\n\n\[email protected]('/index', methods=['GET'])\[email protected]('/', methods=['GET'])\ndef index():\n if session.get('logged_in') and session.get('username'):\n if session.get('role') == 'teacher':\n return redirect(url_for('teacher_dashboard'))\n elif session.get('role') == 'student':\n return redirect((url_for('student_dashboard')))\n else:\n return redirect(url_for('login'))\n\n\[email protected]('/login', methods=['GET'])\ndef login():\n if session.get('logged_in') and session.get('username'):\n return redirect(url_for('dashboard'))\n else:\n return render_template('main_login.html')\n\n\[email protected]('/login/<role>', methods=['GET', 'POST'])\ndef login_role(role):\n form = LoginForm()\n if form.validate_on_submit():\n username, password = form.username.data, form.password.data\n # TODO: Actually verify password\n if role == 'student':\n student = Student.query.filter_by(username=username).first()\n if student:\n session['logged_in'] = True\n session['username'] = username\n session['role'] = 'student'\n return redirect(url_for('student_dashboard'))\n else:\n form.username.errors.append('Unknown username')\n return render_template('student_login.html', form=form)\n elif role == 'teacher':\n teacher = Teacher.query.filter_by(username=username).first()\n if teacher:\n session['logged_in'] = True\n session['username'] = username\n session['role'] = 'teacher'\n return redirect(url_for('teacher_dashboard'))\n else:\n form.username.errors.append('Unknown username')\n return render_template('teacher_login.html', form=form)\n else:\n return abort(403)\n else:\n if role == 'student':\n return render_template('student_login.html', form=form)\n elif role == 'teacher':\n return render_template('teacher_login.html', form=form)\n else:\n return abort(404)\n\n\[email protected]('/logout', methods=['GET', 'POST'])\ndef logout():\n session.clear()\n return redirect(url_for('login'))\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\[email protected]('/teacher', methods=['GET'])\ndef teacher_dashboard():\n if session.get('role') == 'teacher':\n username = session.get('username')\n teacher = Teacher.query.filter_by(username=username).first()\n courses = teacher.courses_taught\n return render_template('teacher_dashboard.html', courses=courses)\n else:\n return abort(403)\n\n\[email protected]('/teacher/course/<course_name>', methods=['GET', 'POST'])\ndef get_course(course_name):\n if session.get('role') == 'teacher':\n username = session.get('username')\n teacher = Teacher.query.filter_by(username=username).first()\n course = Course.query.filter_by(teacher=teacher).filter_by(course_name=course_name).first()\n if course:\n classes = course.classes\n return render_template('course_timings.html', course=course, classes=classes)\n else:\n return abort(403)\n else:\n return abort(403)\n\n\[email protected]('/teacher/course/<course_name>/<class_date>', methods=['GET', 'POST'])\ndef get_class(course_name, class_date):\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(os.getcwd() + '/' + app.config['UPLOAD_FOLDER'], filename))\n people_found = give_match(os.path.join(os.getcwd() + '/' + app.config['UPLOAD_FOLDER'], filename))\n username = session.get('username')\n teacher = Teacher.query.filter_by(username=username).first()\n course = Course.query.filter_by(teacher=teacher).filter_by(course_name=course_name).first()\n the_class = Class.query.filter_by(course=course).filter_by(date=class_date).first()\n for people in people_found:\n people = people.strip()\n print(people)\n student = Student.query.filter_by(full_name=people).first()\n if student:\n attendance = Attendance(course.id, the_class.id, student.id, True)\n db.session.add(attendance)\n db.session.commit()\n else:\n print('Student not detected')\n return redirect(request.url)\n else:\n if session.get('role') == 'teacher':\n username = session.get('username')\n teacher = Teacher.query.filter_by(username=username).first()\n course = Course.query.filter_by(teacher=teacher).filter_by(course_name=course_name).first()\n if course:\n the_class = Class.query.filter_by(course=course).filter_by(date=class_date).first()\n attendance = the_class.attendance\n return render_template('class.html', course=course, the_class=the_class, attendances=attendance)\n else:\n return abort(403)\n else:\n return abort(403)\n" }, { "alpha_fraction": 0.7956989407539368, "alphanum_fraction": 0.7956989407539368, "avg_line_length": 40.33333206176758, "blob_id": "34f671c5b180116b0094aef1935bc5b6c10b8e13", "content_id": "5dc4e660e04d7a7663c1598ae9c92a3694974485", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "no_license", "max_line_length": 62, "num_lines": 9, "path": "/admin.py", "repo_name": "anshuman73/facematch", "src_encoding": "UTF-8", "text": "from app import admin, db\nfrom flask_admin.contrib.sqla import ModelView\nfrom models import Student, Teacher, Course, Class, Attendance\n\nadmin.add_view(ModelView(Student, db.session))\nadmin.add_view(ModelView(Teacher, db.session))\nadmin.add_view(ModelView(Course, db.session))\nadmin.add_view(ModelView(Class, db.session))\nadmin.add_view(ModelView(Attendance, db.session))\n" }, { "alpha_fraction": 0.6729559898376465, "alphanum_fraction": 0.6823899149894714, "avg_line_length": 33.07143020629883, "blob_id": "53d0ac9bf7371c869d118e21664ef6e2d3979e76", "content_id": "fd86e42fedb6dc61547b4700883e935c88b432f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 954, "license_type": "no_license", "max_line_length": 120, "num_lines": 28, "path": "/face_match.py", "repo_name": "anshuman73/facematch", "src_encoding": "UTF-8", "text": "import face_recognition\nfrom PIL import Image\nimport numpy as np\nimport os\n\nknown_faces = []\nknown_faces_names = []\n\nworking_dir = os.getcwd() + '/' + 'Faces'\n\nfor file in os.listdir(working_dir):\n known_faces.append((face_recognition.face_encodings(face_recognition.load_image_file(working_dir + '/' + file))[0]))\n known_faces_names.append(file.rsplit('.', 1)[0])\n\n\ndef give_match(file_path):\n unknown_faces = face_recognition.face_encodings(face_recognition.load_image_file(file_path))\n people_found = []\n print(known_faces_names)\n for face in unknown_faces:\n face_distances = face_recognition.face_distance(known_faces, face)\n face_distances = ['{0:.2f}'.format((1-x) * 100) for x in face_distances]\n print(face_distances)\n max_index = face_distances.index(max(face_distances))\n max_match_person = known_faces_names[max_index]\n people_found.append(max_match_person)\n\n return people_found\n" }, { "alpha_fraction": 0.6612426042556763, "alphanum_fraction": 0.6612426042556763, "avg_line_length": 37.26415252685547, "blob_id": "ab658576a34d7b01a14bd293681ebea6a59030ea", "content_id": "b8753416890e63584264a5bd1f0c50fa75d801bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2028, "license_type": "no_license", "max_line_length": 101, "num_lines": 53, "path": "/models.py", "repo_name": "anshuman73/facematch", "src_encoding": "UTF-8", "text": "from app import db\n\n\nclass User(object):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.UnicodeText, unique=True)\n full_name = db.Column(db.UnicodeText)\n\n\nclass Teacher(db.Model, User):\n courses_taught = db.relationship('Course', backref='teacher', lazy=True)\n\n\ncourses = db.Table('courses',\n db.Column('course_id', db.Integer, db.ForeignKey('course.id'), primary_key=True),\n db.Column('student_id', db.Integer, db.ForeignKey('student.id'), primary_key=True)\n )\n\n\nclass Student(db.Model, User):\n courses_taken = db.relationship('Course', secondary=courses, lazy='subquery',\n backref=db.backref('students', lazy=True))\n attendance = db.relationship('Attendance', backref='student', lazy=True)\n\n\nclass Course(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n course_name = db.Column(db.UnicodeText)\n course_teacher_id = db.Column(db.Integer, db.ForeignKey('teacher.id'), nullable=False)\n classes = db.relationship('Class', backref='course', lazy=True)\n\n\nclass Class(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n course_id = db.Column(db.Integer, db.ForeignKey('course.id'), nullable=False)\n start_time = db.Column(db.DateTime)\n end_time = db.Column(db.DateTime)\n date = db.Column(db.Date)\n attendance = db.relationship('Attendance', backref='class', lazy=True)\n\n\nclass Attendance(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n course_id = db.Column(db.Integer, db.ForeignKey('course.id'), nullable=False)\n class_id = db.Column(db.Integer, db.ForeignKey('class.id'), nullable=False)\n student_id = db.Column(db.Integer, db.ForeignKey('student.id'), nullable=False)\n attended = db.Column(db.Boolean, default=False)\n\n def __init__(self, course_id, class_id, student_id, attended=True):\n self.class_id = class_id\n self.course_id = course_id\n self.student_id = student_id\n self.attended = attended\n" } ]
6
BrendanWilby/py-gameoflife
https://github.com/BrendanWilby/py-gameoflife
eccc985ff2954df08189cb0ee48e0b309e7fa394
b14dbe735191f617fc1b0335469d842a40b54f83
9e2f1dfc1250bd279de739739f6bde46618e4f52
refs/heads/master
2020-07-08T04:23:08.719609
2019-08-21T10:45:36
2019-08-21T10:45:36
203,563,555
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 39, "blob_id": "205a71b54cd49fdddda4ea89b9e0c10be065c0a6", "content_id": "5eb5676a309290ed1da0127fb98f1881295d893f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 40, "license_type": "no_license", "max_line_length": 39, "num_lines": 1, "path": "/README.md", "repo_name": "BrendanWilby/py-gameoflife", "src_encoding": "UTF-8", "text": "Python version of Conway's Game of Life\n" }, { "alpha_fraction": 0.455698698759079, "alphanum_fraction": 0.4703666865825653, "avg_line_length": 30.14649772644043, "blob_id": "33d33690f2d42393588ed735fb575f13ca1284b2", "content_id": "5b16bde2418689a5cc7c09ab8ec50545408ee9dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5045, "license_type": "no_license", "max_line_length": 115, "num_lines": 157, "path": "/gameoflife.py", "repo_name": "BrendanWilby/py-gameoflife", "src_encoding": "UTF-8", "text": "import sys\r\nimport random\r\nimport matplotlib.pyplot as plt\r\n\r\nclass GameOfLife():\r\n def __init__(self):\r\n self.cells = [[]]\r\n self.grid_size = 0\r\n self.generations = 0\r\n self.alive = 0\r\n self.dead = 0\r\n\r\n self.game_start()\r\n \r\n def game_start(self):\r\n print(\"==================\")\r\n print(\"GAME OF LIFE\")\r\n print(\"==================\")\r\n print(\"1. Generate Randomly\")\r\n\r\n choice = self.validate_input(\"Choice: \", 1, 1)\r\n\r\n if choice == 1:\r\n self.grid_size = self.validate_input(\"Please enter the grid size: \", 1, 20)\r\n self.init_cells()\r\n self.draw_cells()\r\n\r\n self.game_loop()\r\n\r\n def game_loop(self):\r\n while 1:\r\n if self.yes_no_input(\"Advance to Next Generation? (y/n, yes/no, quit): \"):\r\n self.advance_gen()\r\n self.draw_cells()\r\n\r\n if self.alive == 0:\r\n break\r\n else:\r\n break\r\n \r\n print(\"Finished Simulation after %d generations.\" %self.generations)\r\n\r\n def advance_gen(self):\r\n next_gen = [cells_row[:] for cells_row in self.cells]\r\n\r\n for i in range(0, self.grid_size):\r\n for j in range(0, self.grid_size):\r\n neighbours = self.count_neighbours(i, j)\r\n\r\n if self.cells[i][j] == 0:\r\n if neighbours == 3:\r\n next_gen[i][j] = 1\r\n elif self.cells[i][j] == 1:\r\n if neighbours < 2 or neighbours > 3:\r\n next_gen[i][j] = 0\r\n self.cells = next_gen\r\n self.generations += 1\r\n self.alive = self.count_alive()\r\n self.dead = (self.grid_size * self.grid_size) - self.alive\r\n\r\n def draw_cells(self):\r\n print(\"===================\")\r\n print(\"GENERATION %d\" %self.generations)\r\n print(\"===================\")\r\n\r\n #plt.imshow(self.cells, cmap=\"gray\")\r\n #plt.title(\"Generation %d\" %self.generations)\r\n #plt.show(block=False)\r\n\r\n for row in self.cells:\r\n row_vals = [str(i) for i in row]\r\n line = \"\".join(row_vals)\r\n print(line)\r\n \r\n print(\"Alive: %d\" %self.alive)\r\n print(\"Dead: %d\" %self.dead)\r\n\r\n def validate_input(self, message, min_value, max_value):\r\n output = 0\r\n\r\n while 1:\r\n output = input(message)\r\n\r\n try:\r\n check = int(output)\r\n\r\n if check < min_value or check > max_value:\r\n print(\"Input outside of allowed range!\")\r\n else:\r\n break\r\n except ValueError:\r\n print(\"Input must be an integer number!\")\r\n return int(output)\r\n\r\n def yes_no_input(self, message):\r\n output = False\r\n\r\n while 1:\r\n in_value = input(message)\r\n\r\n if in_value == \"y\" or in_value == \"yes\":\r\n output = True\r\n break\r\n elif in_value == \"n\" or in_value == \"no\" or in_value == \"quit\":\r\n output = False\r\n break\r\n else:\r\n print(\"Invalid input. Try again.\")\r\n return output\r\n\r\n def init_cells(self):\r\n self.cells = [[random.randint(0, 1) for i in range(0, self.grid_size)] for j in range(0, self.grid_size)]\r\n print(\"Created grid of size %d x %d\" %(self.grid_size, self.grid_size))\r\n\r\n self.alive = self.count_alive()\r\n self.dead = (self.grid_size * self.grid_size) - self.alive\r\n\r\n def count_neighbours(self, cell_x, cell_y):\r\n neighbours = 0\r\n\r\n #north\r\n if cell_y > 0 and self.cells[cell_x][cell_y - 1] == 1:\r\n neighbours += 1\r\n #east\r\n if cell_x < self.grid_size - 1 and self.cells[cell_x + 1][cell_y] == 1:\r\n neighbours += 1\r\n #south\r\n if cell_y < self.grid_size - 1 and self.cells[cell_x][cell_y + 1] == 1:\r\n neighbours += 1\r\n #west\r\n if cell_x > 0 and self.cells[cell_x - 1][cell_y] == 1:\r\n neighbours += 1\r\n #north east\r\n if cell_x < self.grid_size - 1 and cell_y > 0 and self.cells[cell_x + 1][cell_y - 1] == 1:\r\n neighbours += 1\r\n #south east\r\n if cell_x < self.grid_size - 1 and cell_y < self.grid_size - 1 and self.cells[cell_x + 1][cell_y + 1] == 1:\r\n neighbours += 1\r\n #south west\r\n if cell_x > 0 and cell_y < self.grid_size - 1 and self.cells[cell_x - 1][cell_y + 1] == 1:\r\n neighbours += 1\r\n #north west\r\n if cell_x > 0 and cell_y > 0 and self.cells[cell_x - 1][cell_y - 1] == 1:\r\n neighbours += 1\r\n\r\n return neighbours\r\n\r\n def count_alive(self):\r\n alive = 0\r\n\r\n for i in range(0, self.grid_size):\r\n for j in range(0, self.grid_size):\r\n alive += self.cells[i][j]\r\n return alive\r\n\r\nif __name__ == \"__main__\":\r\n gol = GameOfLife()" } ]
2
Navak94/PystockMonthlyPLot
https://github.com/Navak94/PystockMonthlyPLot
1b19bd6ab0bf5bcad9c7f7b6f4bdc95f736d20bf
d060a68c6de19e311885fccc63e408e16a3d98d2
4cd72d95a42787631d972299b8e8ad8d9590d90b
refs/heads/master
2020-12-11T22:16:24.364488
2020-01-15T01:39:39
2020-01-15T01:39:39
233,972,489
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.778761088848114, "alphanum_fraction": 0.778761088848114, "avg_line_length": 36.66666793823242, "blob_id": "6b69cbc3a32cd1421e7080c4f47675f25d13b536", "content_id": "9231728a189f2a778605f81a8350963732f29ebf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 113, "license_type": "no_license", "max_line_length": 59, "num_lines": 3, "path": "/README.md", "repo_name": "Navak94/PystockMonthlyPLot", "src_encoding": "UTF-8", "text": "I do not own, nor did I create the modules imported\n\nThis is a working demo that gets stock plots month by month\n" }, { "alpha_fraction": 0.48805731534957886, "alphanum_fraction": 0.5278662443161011, "avg_line_length": 39.51612854003906, "blob_id": "50b88c13092876833c6a197c187a4acafe6967e5", "content_id": "6ea9b40b33961639c74f7f71aaf10ee026f2ef8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1256, "license_type": "no_license", "max_line_length": 96, "num_lines": 31, "path": "/PystocksMONTHLY.py", "repo_name": "Navak94/PystockMonthlyPLot", "src_encoding": "UTF-8", "text": "import yfinance as yf\nimport datetime\nimport matplotlib.pyplot as plt\nimport os\nimport getpass\nCompany= \"GE\"\ntry:\n for y in range(1,21): # loop through decade 2001-2020\n for x in range(1,13): # loop through months 1-12\n plt.clf \n if x<12:\n xdate = datetime.datetime(2000+y, x, 1)\n start = str(xdate.strftime(\"%Y-%m-%d\"))\n ydate = datetime.datetime(2000+y, x+1, 1)\n finish = str(ydate.strftime(\"%Y-%m-%d\"))\n if x==12:\n xdate = datetime.datetime(2000+y, 12, 1)\n start = str(xdate.strftime(\"%Y-%m-%d\"))\n ydate = datetime.datetime(2000+y+1, 1, 1)\n finish = str(ydate.strftime(\"%Y-%m-%d\")) \n data = yf.download(Company,start,finish)\n data.Close.plot()\n xis = str(x)\n yis = str(2000+y)\n directory = \"C:\\\\Users\\\\\"+getpass.getuser()+\"\\\\Desktop\\\\pyStockPlots\\\\\"+Company+\"\\\\\"\n if not os.path.exists(directory): # if folder does not exist create it\n os.makedirs(directory)\n plt.savefig(directory+ \" \"+yis+\" \"+xis+\" \"+Company+'.png')\n plt.close()\nexcept Exception as e:\n print(\"Date not valid\")\n" } ]
2
Kacpro/Semestr_4
https://github.com/Kacpro/Semestr_4
013a3e997613a69f815f36fb8873c818e3900e57
33ce578d69a255c09d13e0d7dad18fdc3ca6ea52
70bcf07540571573b2dc4133b4e8a6e70969c26e
refs/heads/master
2021-01-24T02:52:46.464499
2018-06-13T18:36:42
2018-06-13T18:36:42
122,864,777
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7132866978645325, "alphanum_fraction": 0.7132866978645325, "avg_line_length": 19.428571701049805, "blob_id": "e6d8cafb7a35165e415fd6502fb7243317e3fd9e", "content_id": "0ad48ed63d42272e4eebcce5f5f97ae6e012c6b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 143, "license_type": "permissive", "max_line_length": 44, "num_lines": 7, "path": "/SysOpy/lab7/src/zad1/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all: barber.c client.c\n\tgcc -Wall barber.c -o barber -lpthread -lrt\n\tgcc -Wall client.c -o client -lpthread -lrt\n\nclean:\n\trm barber\n\trm client\n" }, { "alpha_fraction": 0.5147286653518677, "alphanum_fraction": 0.539534866809845, "avg_line_length": 19.566667556762695, "blob_id": "debb5dbd269cdefa21a9a94af59ed75114a67c4c", "content_id": "7cdafb805bb81aae2715c4ac13f21b7681e48c37", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 648, "license_type": "permissive", "max_line_length": 56, "num_lines": 30, "path": "/Python/lab1/src/equation.py", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "import math\r\n#import math as m\r\n#from math import *\r\n\r\n# w konsoli: dir(math) - lista funkcji w bibliotece math\r\n# math.sqrt.__doc__ - dokumentacja funkcji\r\n\r\ndef main():\r\n\ta = float(input(\"a = \"))\r\n\tb = float(input(\"b = \"))\r\n\tc = float(input(\"c = \"))\r\n\r\n\tdelta = b*b - 4*a*c\r\n\tif delta > 0 : \r\n\t\tx1 = (-b-math.sqrt(delta))/(4*a)\r\n\t\tx2 = (-b+math.sqrt(delta))/(4*a)\r\n\t\tprint(\"x1=\",x1, \"x2=\", x2)\r\n\telif delta == 0:\r\n\t\tx = -b/(4*a)\r\n\t#\tprint(\"x=\", x)\r\n\t#\tprint(\"x = %0.4f\"%x) jak więcej zmiennych to w krotce\r\n\t\tprint(\"x ={:0.4f}\".format(x))\r\n\telse:\r\n\t\tprint(\"Brak rozwiązań\")\r\n\tprint(\"Do widzenia\")\r\n\r\n#if __main__ == \"main\":\r\n#\tmain()\r\n\r\nmain()" }, { "alpha_fraction": 0.6725663542747498, "alphanum_fraction": 0.6725663542747498, "avg_line_length": 15.142857551574707, "blob_id": "c0e3ba67411b5a9da3f004feb0fe005acd565c04", "content_id": "1a59a4256d691b470e6533f87fa48af011c4bad9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 113, "license_type": "permissive", "max_line_length": 31, "num_lines": 7, "path": "/SysOpy/lab5/src/zad2/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all: master.c slave.c\n\tgcc -Wall -o master ./master.c\n\tgcc -Wall -o slave ./slave.c\n\nclean:\n\trm master\n\trm slave\n" }, { "alpha_fraction": 0.5686274766921997, "alphanum_fraction": 0.6078431606292725, "avg_line_length": 7.5, "blob_id": "9a4be96f502322f810afbe7ca42bd59a805caf6c", "content_id": "6c5cc2de769df2eb53a590992db42046bf1a29e9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 51, "license_type": "permissive", "max_line_length": 19, "num_lines": 6, "path": "/SysOpy/lab5/src/zad1/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all: zad1.c\n\tgcc -Wall ./zad1.c\n\n\nclean:\n\trm a.out\n" }, { "alpha_fraction": 0.7237569093704224, "alphanum_fraction": 0.7237569093704224, "avg_line_length": 19.11111068725586, "blob_id": "fdd8eda7b263d9c1e675189aab4f0e22ad753488", "content_id": "08d7bca4ca068327a3cf281070e2a0a9f2c8b369", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 181, "license_type": "permissive", "max_line_length": 35, "num_lines": 9, "path": "/SysOpy/lab6/src/zad2/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all: client.c server.c properties.h\n\tgcc -Wall client.c -o client -lrt\n\tgcc -Wall server.c -o server -lrt\n\tgcc -Wall properties.h\n\nclean:\n\trm client\n\trm server\n\trm properties.h.gch\n" }, { "alpha_fraction": 0.44296711683273315, "alphanum_fraction": 0.4520643949508667, "avg_line_length": 17.558441162109375, "blob_id": "f457a09584d3bfb075033fea3008569853838114", "content_id": "a42540c0c5b125149f93071efd7909537641f478", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1431, "license_type": "permissive", "max_line_length": 97, "num_lines": 77, "path": "/SysOpy/lab4/src/zad1/zad1.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <bits/types/time_t.h>\n#include <time.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <signal.h>\n#include <sys/wait.h>\n\nint STOPPED = 0;\npid_t CHILD;\n\n\nvoid signalHandling(int sig)\n{\n switch (sig)\n {\n case SIGTSTP:\n {\n if (!STOPPED)\n {\n STOPPED = 1;\n printf(\"\\nOczekuję na CTRL+Z - kontynuacja albo CTR+C - zakonczenie programu\\n\");\n kill(CHILD, SIGKILL);\n }\n else\n {\n STOPPED = 0;\n CHILD = fork();\n\t\tif (CHILD < 0)\n\t\t{\n\t\t\tprintf(\"Fork error\");\n\t\t\texit(-1);\n\t\t}\n\t\telse if (CHILD == 0) execl(\"./script\", \"script\", NULL);\n }\n break;\n }\n\n case SIGINT:\n {\n printf(\"\\nOdebrano sygnał SIGINT\\n\");\n kill(CHILD, SIGKILL);\n raise(SIGKILL);\n break;\n }\n\n default:\n {\n exit(-1);\n }\n }\n}\n\n\nint main()\n{\n if ((CHILD = fork()) < 0)\n {\n printf(\"Fork error\");\n exit(-1);\n }\n else if (CHILD > 0)\n {\n signal(SIGTSTP, signalHandling);\n\n struct sigaction act;\n act.sa_handler = signalHandling;\n sigemptyset(&act.sa_mask);\n act.sa_flags = 0;\n sigaction(SIGINT, &act, NULL);\n\twhile(1);\n }\n else if (CHILD == 0)\n {\n \texecl(\"./script\", \"script\", NULL);\n }\n}\n" }, { "alpha_fraction": 0.432783842086792, "alphanum_fraction": 0.45066171884536743, "avg_line_length": 25.106060028076172, "blob_id": "949909e680f00cdebe7282572e25ca94baa05a32", "content_id": "10e3135f5bf046363561f9cd34d3c3f7053b1eef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8614, "license_type": "permissive", "max_line_length": 126, "num_lines": 330, "path": "/SysOpy/lab10/src/zad2/server.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <sys/un.h>\n#include <netinet/in.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <unistd.h>\n#include <time.h>\n#include <sys/epoll.h>\n#include <pthread.h>\n#include <signal.h>\n#include <poll.h>\n#include <errno.h>\n#include <setjmp.h>\n#include <stddef.h>\n\n\nstruct node\n{\n char* name;\n struct sockaddr addr;\n int num;\n int type;\n};\n\n\nint numOfClients = 0;\nint maxEvenets = 100;\nint clusterSize = 20;\nint socPoll;\nint becauseIDontKnowHowToDoItDifferently = 0;\n\n\nstruct node* clients;\n\n\nint initLocal(char* path)\n{\n int listenfd = 0;\n struct sockaddr_un serv_addr;\n\n listenfd = socket(AF_UNIX, SOCK_DGRAM, 0);\n\n struct timeval tv;\n tv.tv_sec = 1;\n tv.tv_usec = 0;\n setsockopt(listenfd, SOL_SOCKET, SO_RCVTIMEO, (const char*)&tv, sizeof tv);\n\n serv_addr.sun_family = AF_UNIX;\n strcpy(serv_addr.sun_path, path);\n\n// size_t size = (offsetof (struct sockaddr_un, sun_path)\n// + strlen (serv_addr.sun_path));\n\n// unlink(path);\n bind(listenfd, (struct sockaddr*)&serv_addr, sizeof(struct sockaddr));\n\n return listenfd;\n}\n\n\nint initNet(int port)\n{\n int listenfd = 0;\n struct sockaddr_in serv_addr;\n\n listenfd = socket(AF_INET, SOCK_DGRAM, 0);\n int option = 1;\n setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, &option, sizeof(option));\n\n struct timeval tv;\n tv.tv_sec = 1;\n tv.tv_usec = 0;\n setsockopt(listenfd, SOL_SOCKET, SO_RCVTIMEO, (const char*)&tv, sizeof tv);\n\n serv_addr.sin_family = AF_INET;\n serv_addr.sin_addr.s_addr = htonl(INADDR_ANY);\n serv_addr.sin_port = htons(5000);\n\n bind(listenfd, (struct sockaddr*)&serv_addr, sizeof(serv_addr));\n\n return listenfd;\n}\n\nstatic int netFd;\nstatic int localFd;\n\nstruct soc\n{\n int port;\n char* path;\n};\n\n\nvoid* monitor(void* arg) {\n\n struct soc soc = *(struct soc*)arg;\n socPoll = epoll_create1(0);\n\n struct epoll_event event;\n\n netFd = initNet(soc.port);\n event.data.fd = netFd;\n event.events = EPOLLIN | EPOLLET;\n epoll_ctl(socPoll, EPOLL_CTL_ADD, netFd, &event);\n\n// perror(\"net\");\n\n localFd = initLocal(soc.path);\n// perror(\"init\");\n\n event.data.fd = localFd;\n event.events = EPOLLIN | EPOLLET;\n epoll_ctl(socPoll, EPOLL_CTL_ADD, localFd, &event);\n\n// perror(\"local\");\n\n struct epoll_event *events = calloc(maxEvenets, sizeof(struct epoll_event));\n\n char readBuff[128];\n\n while (1)\n {\n // perror(\"while\");\n int n = epoll_wait(socPoll, events, maxEvenets, -1);\n for (int i = 0; i < n; i++)\n {\n for (int j=0; j<128; j++) readBuff[j] = '\\0';\n\n if (events[i].data.fd == netFd || events[i].data.fd == localFd)\n {\n// int clientFD = accept(events[i].data.fd, (struct sockaddr *) NULL, 0);\n// if (clientFD == -1) continue;\n\n struct sockaddr addr;\n socklen_t addrSize = sizeof(struct sockaddr);\n// perror(\"a\");\n recvfrom(events[i].data.fd, readBuff, 128, MSG_PEEK, &addr, &addrSize);\n if (readBuff[0] != '2')\n {\n recvfrom(events[i].data.fd, readBuff, 128, 0, &addr, &addrSize);\n }\n else\n {\n continue;\n }\n\n if (readBuff[0] == '1')\n {\n char *read2 = calloc(128, sizeof(char));\n strcpy(read2, readBuff);\n\n char *name = strtok(read2 + sizeof(char), \"\\0\");\n\n int flag = 1;\n for (int j = 0; j < clusterSize; j++)\n {\n if (!strcmp(clients[j].name, name))\n {\n flag = 0;\n }\n }\n\n if (flag == 1)\n {\n for (int j=0; j<clusterSize; j++)\n {\n if (clients[j].num == -1)\n {\n clients[j].name = name;\n clients[j].addr = addr;\n clients[j].type = events[i].data.fd == localFd ? 1 : 0;\n clients[j].num = numOfClients;\n break;\n }\n }\n printf(\"Node up: %d (%s)\\n\", numOfClients, name);\n numOfClients++;\n\n sendto(events[i].data.fd, \"1Y\", 2, 0, &addr, addrSize);\n// perror(\"sendto\");\n }\n else\n {\n sendto(events[i].data.fd, \"1N\", 2, 0, &addr, addrSize);\n }\n }\n else if (readBuff[0] == '3')\n {\n printf(\"Result: %s, Node: %d\\n\", strtok(readBuff + 2 * sizeof(char), \"\\0\"), (int)(readBuff[1]) - 1);\n }\n else if (readBuff[0] == '2')\n {\n// printf(\">>pong\\n\");\n int n = sendto(events[i].data.fd, \"2\", 1, 0, &addr, addrSize);\n// printf(\"%d\\n\", n);\n// perror(\"sent\");\n }\n else\n {\n // printf(\"Unknown message\\n\");\n }\n\n\n }\n else\n {\n// perror(\"else\");\n recv(events[i].data.fd, readBuff, 128, 0);\n if (readBuff[0] == '3')\n {\n printf(\"Result: %s, Node: %d\\n\", strtok(readBuff + 2 * sizeof(char), \"\\0\"), (int)(readBuff[1]) - 1);\n }\n }\n }\n\n\n usleep(1000);\n }\n}\n\n\nvoid* ping(void* arg)\n{\n while (1)\n {\n sleep(1);\n for (int i = 0; i < clusterSize; i++)\n {\n if (clients[i].num != -1)\n {\n ssize_t res = sendto(clients[i].type?localFd:netFd, \"2\", 1, 0, &clients[i].addr, sizeof(struct sockaddr));\n\n char *buf = calloc(128, sizeof(char));\n socklen_t size = sizeof(clients[i].addr);\n // printf(\"%ld\\n\", res);\n// perror(\"send_ping\");\n recvfrom(clients[i].type?localFd:netFd, buf, 128,0 , &clients[i].addr, &size);\n// perror(\"recv\");\n// printf(\"%ld\\n\", res);\n if (errno == EAGAIN)\n {\n printf(\"Node down: %d\\n\", clients[i].num);\n clients[i].num = -1;\n clients[i].name = \"\";\n clients[i].type = -1;\n errno = 0;\n }\n if (buf[0] != '2')\n {\n char* buf2 = calloc(130, sizeof(char));\n strcpy(buf2, \"5\");\n strcat(buf2, buf);\n sendto(clients[i].type?localFd:netFd, buf2, 130, 0, &clients[i].addr, sizeof(struct sockaddr));\n }\n }\n\n }\n }\n}\n\n\n\n\nvoid* calc(void* a)\n{\n srand(time(0));\n char* buf = calloc(128, sizeof(char));\n size_t size = 127;\n while(1)\n {\n char* msg = calloc(130, sizeof(char));\n strcat(msg, \"3\");\n getline(&buf, &size, stdin);\n\n int p;\n while (clients[p = rand()%clusterSize].num == -1);\n\n char pos[] = {(char)(p + 1), '\\0'};\n strcat(msg, pos);\n strcat(msg, buf);\n\n sendto(netFd, msg, 130, 0, &clients[p].addr, sizeof(clients[p].addr));\n perror(\"send calc\");\n free(msg);\n usleep(1000);\n }\n}\n\n\nvoid signalHandling(int sig)\n{\n for (int i=0; i<clusterSize; i++)\n {\n // close(clients[i].fd);\n }\n free(clients);\n printf(\"\\n\");\n exit(0);\n}\n\n\nint main(int argc, char** argv)\n{\n clients = calloc(clusterSize, sizeof(struct node));\n for (int i=0; i<clusterSize; i++)\n {\n clients[i].name = \"\";\n clients[i].num = -1;\n }\n\n signal(SIGINT, signalHandling);\n\n pthread_t watcher, pinger, sender;\n\n struct soc* s = calloc(1, sizeof(struct soc));\n s->path = argv[2];\n s->port = atoi(argv[1]);\n\n pthread_create(&watcher, NULL, monitor, s);\n// pthread_create(&pinger, NULL, ping, s);\n pthread_create(&sender, NULL, calc, NULL);\n\n\n\n pthread_join(watcher, NULL);\n // pthread_join(pinger, NULL);\n pthread_join(sender, NULL);\n\n return 0;\n}" }, { "alpha_fraction": 0.4771241843700409, "alphanum_fraction": 0.5119825601577759, "avg_line_length": 16.69230842590332, "blob_id": "89d40b7c726305aadec8a3076340eb95d22b54c1", "content_id": "9f5c20fa2c7642bc650e2231f0b5068ca641b1d8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 459, "license_type": "permissive", "max_line_length": 64, "num_lines": 26, "path": "/SysOpy/lab8/src/zad1/generator.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <time.h>\n\nint main(int argc, char** argv)\n{\n\n if (argc != 3) return -1;\n char* fileName = argv[1];\n int size = atoi(argv[2]);\n\n srand(time(0));\n\n FILE* file = fopen(fileName, \"w\");\n\n fprintf(file, \"%d\\n\", size);\n\n for(int i=0; i<size*size; i++)\n {\n fprintf(file, \"%lf \", rand()%100/(100.0 * size * size));\n if ((i+1)%9 == 0) fprintf(file, \"\\n\");\n }\n\n fclose(file);\n\n}" }, { "alpha_fraction": 0.5171849131584167, "alphanum_fraction": 0.5417348742485046, "avg_line_length": 14.666666984558105, "blob_id": "e3d8ad932f661bb5d948690c52f77dbe78df0303", "content_id": "f1e93b7dc0054b0db525a81ae227d9fb5a6e0fc1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 611, "license_type": "permissive", "max_line_length": 49, "num_lines": 39, "path": "/SysOpy/lab5/src/zad2/master.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <stdio.h>\n#include <sys/stat.h>\n\n\nvoid masterLogic(char* fifoName)\n{\n mkfifo(fifoName, 0644);\n size_t bufSize = 255;\n char* buffer = calloc(bufSize, sizeof(char));\n\n FILE* file;\n\n while (1)\n {\n\tfile = fopen(fifoName, \"r\");\n if (getline(&buffer, &bufSize, file) > 0)\n printf(\"%s\", buffer);\n\tfclose(file);\n }\n}\n\n\nint parse(int argc, char** argv)\n{\n if (argc != 2) return -1;\n masterLogic(argv[1]);\n return 0;\n}\n\n\nint main(int argc, char** argv)\n{\n if (parse(argc, argv) != 0)\n {\n printf(\"Error\");\n exit(-1);\n }\n}\n" }, { "alpha_fraction": 0.45137614011764526, "alphanum_fraction": 0.4688073396682739, "avg_line_length": 21.831579208374023, "blob_id": "0b329e05597805d5aa3e9a311d272b55a570e8ef", "content_id": "fd2376785c07c088be2d67e637a6fb00bf6a37bd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2180, "license_type": "permissive", "max_line_length": 73, "num_lines": 95, "path": "/Python/lab2/src/zad1.py", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "def foo(s):\n a = []\n for letter in s:\n if (letter.isalpha()):\n a.append(letter);\n return sorted(set(a))\n\n\nop = '&>|'\n\n#var = 'abcdefghijklmnopqrstuvwxyz'\n\ndef check(exp):\n ln = 0\n state = True\n for z in exp:\n if z == '(': ln = ln + 1\n if z == ')': ln = ln - 1\n if ln < 0: return False\n if state == True:\n if z in foo(exp): state = False\n elif z in ')'+op: return False\n else:\n if z in op: state = True\n elif z in '('+ foo(exp) : return False\n if ln != 0: return False\n return not state \n \n\ndef bal(w, op):\n ln = 0\n for i in range(len(w)-1, 0, -1):\n if w[i] == '(': ln += 1\n if w[i] == ')': ln -= 1\n if w[i] in op and ln == 0: return i\n return -1\n\n\ndef onp(w):\n while w[0] == '(' and w[-1] == ')' and check(w[1:-1]):\n w = w[1:-1]\n p = bal(w, '>')\n if p>=0:\n return onp(w[:p]) + onp(w[p+1:]) + w[p]\n p = bal(w, '&|')\n if p>=0:\n return onp(w[:p]) + onp(w[p+1:]) + w[p]\n return w\n\n\ndef mapuj(wyr, zm, val):\n l = list(wyr)\n for i in range(len(l)):\n if zm.count(wyr[i]) > 0: \n p = zm.index(wyr[i])\n l[i] = val[p]\n return ''.join(l)\n\n\ndef value(wyr, val):\n zm = foo(wyr) #var\n wyr = mapuj(wyr, zm, val)\n st = []\n for z in wyr:\n if z in '01': st.append(int(z))\n elif z == '&': st.append(st.pop() and st.pop())\n elif z == '|': st.append(st.pop() or st.pop())\n elif z == '>': st.append(st.pop() or 1-st.pop())\n return st.pop()\n\n\ndef gen(n):\n for i in range(2**n):\n yield bin(i)[2:].rjust(n,'0')\n\n\ndef evaluate(expr):\n\tfor val in gen(len(foo(expr))):\n \t\tprint(val[:len(foo(expr))] + ' ' + str(value(onp(expr), val)))\n\n\ndef isTautology(expr):\n for val in gen(len(foo(expr))):\n if value(onp(expr), val) == 0: return False\n return True\n\n\ndef areTheSame(expr1, expr2):\n if foo(expr1) != foo(expr2): return False\n for val in gen(len(foo(expr1))):\n if value(onp(expr1), val) != value(onp(expr2), val): return False\n return True\n\n\nevaluate('a&b|c')\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6022526025772095, "alphanum_fraction": 0.6184509992599487, "avg_line_length": 29.041824340820312, "blob_id": "4e1e46e61696bc3d030862c603d7f44b5756cebc", "content_id": "6e925f7bcae00f31c10068c47688ca5df431e37c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7902, "license_type": "permissive", "max_line_length": 387, "num_lines": 263, "path": "/SysOpy/lab1/src/zad3a/tester_dynamic.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include<stdio.h>\n#include<stdlib.h>\n#include<time.h>\n#include<sys/times.h>\n#include<unistd.h>\n#include<dlfcn.h>\n#include<string.h>\n\nchar staticArray[8192][8192];\n\n\nvoid calculateTimes(FILE* fp, char* operation, clock_t realEnd, clock_t realStart, float sysEnd, float sysStart, float userEnd, float userStart)\n{\n \tlong clk = sysconf(_SC_CLK_TCK);\n \tlong double nseconds = (long double)((unsigned long)realEnd - (unsigned long)realStart)/(long double)CLOCKS_PER_SEC;\n\tlong double nsecondss = (long double)((unsigned long)sysEnd - (unsigned long)sysStart)/clk;\n\tlong double nsecondsu = (long double)((unsigned long)userEnd - (unsigned long)userStart)/clk;\n\tprintf(\"\\t%s:\\t\\t Real: %Lf User: %Lf System: %Lf\\n\", operation, nseconds, nsecondsu, nsecondss);\n\tfprintf(fp,\"%s:\\t\\t Real: %Lf\\t User: %Lf\\t System: %Lf\\n\", operation, nseconds, nsecondsu, nsecondss);\n}\n\nvoid fillArrayDyn(char** array, int arraySize, int blockSize)\n{\n void* handle2 = dlopen(\"./libblocklibrary.so\", RTLD_LAZY);\n\tif (!handle2) printf(\"Can not create a handle\");\n\n\tvoid (*addBlockDynamic)(char**, char*, int, int) = (void (*)(char**, char*, int, int)) dlsym(handle2, \"addBlockDynamic\");\n\tif (dlerror() != NULL) printf(\"Error with addBlockDynamic\");\n\t\n\tfor (int i=0; i< arraySize; i++)\n {\n char* block = calloc(blockSize, sizeof(char));\n for (int j=0; j<blockSize; j++)\n block[j] = rand()%('z' - 'a') + 'a';\n (*addBlockDynamic)(array, block, i, blockSize);\n }\n\n\tdlclose(handle2);\n}\n\n\n\nvoid fillArrayStat(int arraySize, int blockSize)\n{\n\tvoid* handle2 = dlopen(\"./libblocklibrary.so\", RTLD_LAZY);\n\tif (!handle2) printf(\"Can not create a handle\");\n\t\t \n\tchar (*staticArrayPtr)[8192][8192] = (char (*)[8192][8192]) dlsym(handle2, \"staticArray\");\n\tif (dlerror() != NULL) printf(\"Error with staticArray\");\n\n\tfor (int i=0; i<arraySize; i++)\n\t\tfor (int j=0; j<blockSize; j++)\n\t\t\t(*staticArrayPtr)[i][j] = rand()%('z' - 'a') + 'a';\n\t\n\tdlclose(handle2);\n}\n\n\nvoid showHelp()\n{\n\tprintf(\"\\nRequired arguments:\\n\\tarraySize\\n\\tblockSize\\n\\tallockMode (0 - static; 1 - dynamic)\\n\\tfunArg0 (createArray)\\n\\tfunArg1 (delete and insert n blocks)\\n\\tfunArg2 (findBlock)\\n\\tfunArg3 (delete and insert block n times)\\n\\r\\nPositive values in arguments funArg0 - funArg3 are parameters to the specific functions. Other values mean that the function should not be called\\n\\n\");\n}\n\n\nint checkArgs(int argc, char** argv)\n{\t\n\tif (argc != 8) return 1;\n\tif (atoi(argv[3]) != 1 && atoi(argv[3]) != 0) return 1;\n\tif (atoi(argv[5]) > atoi(argv[1])) return 1;\n\treturn 0;\n}\n\n\nint main(int argc, char** argv)\n{\n\t\n\t// Checking the arguments\n\tif (checkArgs(argc, argv) == 1)\n\t{\n\t\tshowHelp();\n\t\treturn 1;\n\t}\n\n\tint arraySize = atoi(argv[1]);\n\tint blockSize = atoi(argv[2]);\n\tint allockMode = atoi(argv[3]);\n\tint funArg0 = atoi(argv[4]);\n\tint funArg1 = atoi(argv[5]);\n\tint funArg2 = atoi(argv[6]);\n\tint funArg3 = atoi(argv[7]);\n\t\n\n\t// Opening the file\n\tFILE *fp;\n\tif ((fp=fopen(\"../zad2/raport2.txt\", \"w\")) == NULL) \n\t{\n\t\tprintf (\"Can not open the file\\n\");\n\t\texit(1);\n\t}\n\t\n\n\t// Creating function handlers\n\tvoid* handle = dlopen(\"./libblocklibrary.so\", RTLD_LAZY);\n\tif (!handle) printf(\"Can not create a handle\");\n\t\n\tchar (*staticArrayPtr)[8192][8192] = (char (*)[8192][8192]) dlsym(handle, \"staticArray\");\n if (dlerror() != NULL) printf(\"Error with staticArray\");\n\n\tchar** (*createArray)(int, int) = (char** (*)(int, int)) dlsym(handle, \"createArray\");\n\tif (dlerror() != NULL) printf(\"Error with createArray\");\n\n\tvoid (*deleteArrayDynamic)(char**, int) = (void (*)(char**, int)) dlsym(handle, \"deleteArrayDynamic\");\n\tif (dlerror() != NULL) printf(\"Error with deleteArrayDynamic\");\n\n\tvoid (*addBlockStatic)(char*, int, int) = (void (*)(char*, int, int)) dlsym(handle, \"addBlockStatic\");\n\tif (dlerror() != NULL) printf(\"Error with addBlockStatic\");\n\n\tvoid (*addBlockDynamic)(char**, char*, int, int) = (void (*)(char**, char*, int, int)) dlsym(handle, \"addBlockDynamic\");\n\tif (dlerror() != NULL) printf(\"Error with addBlockDynamic\");\n\n\tvoid (*deleteBlockDynamic)(char**, int) = (void (*)(char**, int)) dlsym(handle, \"deleteBlockDynamic\");\n\tif (dlerror() != NULL) printf(\"Error with deleteBlockDynamic\");\n\n\tint (*sumChar)(char*, int) = (int (*)(char*, int)) dlsym(handle, \"sumChar\");\n\tif (dlerror() != NULL) printf(\"Error with sumChar\");\n\n\tchar* (*findBlockDyn)(char**, int, int, int) = (char* (*)(char**, int, int, int)) dlsym(handle, \"findBlockDyn\");\n\tif (dlerror() != NULL) printf(\"Error with findBlockDyn\");\n\n\tchar* (*findBlockStat)(int, int, int) = (char* (*)(int, int, int)) dlsym(handle, \"findBlockStat\");\n\tif (dlerror() != NULL) printf(\"Error with findBlockStat\");\n\t\n\t\n\t// Time structures\n\tstruct tms startSys, endSys;\n\ttime_t realStart, realEnd;\n\n\tsrand(time(0));\n\tchar** array;\n\n\n\t// Creating an array\n\tif (allockMode == 1)\n\t{\t\n\t\tif (funArg0 > 0)\n\t\t{\n\t\t\trealStart = clock();\n\t\t\ttimes(&startSys);\n\t\t\t\tarray = createArray(arraySize, blockSize);\n\t\t\t\tfillArrayDyn(array, arraySize, blockSize);\n\t\t\trealEnd = clock();\n\t\t\ttimes(&endSys);\n\t\t\tcalculateTimes(fp, \"Creating an array\", realEnd, realStart, endSys.tms_stime, startSys.tms_stime, endSys.tms_utime, startSys.tms_utime);\n\t\t}\n\t\telse\n\t\t{\n\t\t\tarray = (*createArray)(arraySize, blockSize);\n fillArrayDyn(array, arraySize, blockSize);\n\t\t}\n\t}\n\telse if (allockMode == 0)\n\t{\n\t\tfor (int i=0; i<arraySize; i++)\n\t\t\tfor (int j=0; j<blockSize; j++)\n\t\t\t\t(*staticArrayPtr)[i][j] = (char)0;\n\t}\n\t\n\n\t\n\t// Deleting and inserting n blocks\n\tif (funArg1 > 0)\n\t{\n\t\tif (allockMode == 1)\n\t\t{\n\t\t\trealStart = clock();\n\t\t\ttimes(&startSys);\n\t\t\t\tfor (int i=0; i<funArg1; i++) \n\t\t\t\t\tfree(array[i]);\n\t\t\tfillArrayDyn(array, funArg1, blockSize);\n\t\t\trealEnd = clock();\n\t\t\ttimes(&endSys);\n\t\t}\n\t\telse if (allockMode == 0)\n\t\t{\n\t\t\trealStart = clock();\n\t\t\ttimes(&startSys);\n\t\t\t\tfor (int i=0; i<funArg1; i++)\n\t\t\t\t\tfor (int j=0; j<blockSize; j++)\n\t\t\t\t\t\t(*staticArrayPtr)[i][j] = (char)0;\n\t\t\tfillArrayStat(funArg1, blockSize);\n\t\t\trealEnd = clock();\n\t\t\ttimes(&endSys);\n\t\t}\n\t\tcalculateTimes(fp, \"Del. and ins. n elements\", realEnd, realStart, endSys.tms_stime, startSys.tms_stime, endSys.tms_utime, startSys.tms_utime);\n\t}\n\n\n\n\t// Finding a block\t\n\tif (funArg2 > 0)\n\t{\n\t\tif (allockMode == 1)\n\t\t{\n\t\t\trealStart = clock();\n\t\t\ttimes(&startSys);\n\t\t\t\t(*findBlockDyn)(array, funArg2, arraySize, blockSize);\n\t\t\trealEnd = clock();\n\t\t\ttimes(&endSys);\n\t\t}\n\t\telse if (allockMode == 0)\n\t {\n\t realStart = clock();\n\t times(&startSys);\n\t\t (*findBlockStat)(funArg2, arraySize, blockSize);\n\t\t realEnd = clock();\n\t\t times(&endSys);\n\t\t}\n\t\tcalculateTimes(fp, \"Finding a block\", realEnd, realStart, endSys.tms_stime, startSys.tms_stime, endSys.tms_utime, startSys.tms_utime);\t\t\t\t\t\t }\n\n\n\t// Deleting and inserting block n times\n\tif (funArg3 > 0)\n\t{\n\t\tif (allockMode == 1)\n\t\t{\n\t\t\trealStart = clock();\n\t\t\ttimes(&startSys);\n\t\t\t\tfor (int i=0; i<funArg3; i++)\n\t\t\t\t{\n\t\t\t\t\tfree(array[0]);\n\t\t\t\t\tchar* block = calloc(blockSize, sizeof(char));\n\t \t\tfor (int j=0; j<blockSize; j++)\n\t\t \t\tblock[j] = rand()%('z'-'a')+'a';\n\t\t\t\t\t(*addBlockDynamic)(array, block, 0, blockSize);\n\t\t\t\t}\n\t\t\trealEnd = clock();\n\t\t\ttimes(&endSys);\n\t\t}\n\t\telse if (allockMode == 0)\n\t\t{\n\t\t\trealStart = clock();\n times(&startSys);\n\t\t\t\tfor (int i=0; i<funArg3; i++)\n\t\t\t\t{\t\n\t\t\t\t\tchar block[blockSize];\n\t\t\t\t\tfor (int j=0; j<blockSize; j++)\n\t\t\t\t\t\tblock[j] = rand()%('z'-'a')+'a';\n\t\t\t\t\t(*addBlockStatic)(block, 0, blockSize);\n\t\t\t\t}\n\t\t\trealEnd = clock();\n\t\t\ttimes(&endSys);\n\t\t}\n\t\tcalculateTimes(fp, \"Del. and ins. elem. n times\", realEnd, realStart, endSys.tms_stime, startSys.tms_stime, endSys.tms_utime, startSys.tms_utime);\n\t}\n\t\n\tif (allockMode == 1)\n\t\t(*deleteArrayDynamic)(array, arraySize);\n\n\tprintf(\"\\n\");\n\tfclose(fp);\n\tdlclose(handle);\n\treturn 0;\n}\n\n" }, { "alpha_fraction": 0.5952380895614624, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 6, "blob_id": "c7824a16d4d0ebb3e82c970cc729c379f81a8e16", "content_id": "6f033cfe7c81d5168b4a2ec1f08982d5706e48a9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 42, "license_type": "permissive", "max_line_length": 17, "num_lines": 6, "path": "/SysOpy/lab3/src/zad2/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all:\n\tgcc -Wall zad2.c\n\n\nclean:\n\trm a.out\n" }, { "alpha_fraction": 0.66015625, "alphanum_fraction": 0.6888020634651184, "avg_line_length": 23, "blob_id": "164d69fa0135166c48c9440d0bbd48c60cf4257a", "content_id": "7c3c6e09c3189de36b06767d3e9a0597fb8c862a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 768, "license_type": "permissive", "max_line_length": 65, "num_lines": 32, "path": "/SysOpy/lab1/src/zad3a/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "ARGS = 2048 4096 1 1 1000 1 1000\nLIB_SRC = ../zad1/blocklibrary.c\nMAIN_SRC = ../zad2/tester.c\nMAIN_DYN_SRC = ../zad3a/tester_dynamic.c\n\nall: static shared dynamic\n\n\nstatic:\n\tgcc -c $(LIB_SRC)\n\tar rcs libblocklibrary.a blocklibrary.o\n\tgcc -c $(MAIN_SRC)\n\tgcc tester.o libblocklibrary.a -o tester_static\n\t./tester_static $(ARGS)\n\n\nshared:\n\tgcc -Wall -fPIC -c $(LIB_SRC)\n\tgcc -Wall -o libblocklibrary.so -shared blocklibrary.o\n\tgcc -Wl,-rpath=. -L. -o tester_shared $(MAIN_SRC) -lblocklibrary\n\t./tester_shared $(ARGS)\n\n\ndynamic:\n\tgcc -Wall -fPIC -c $(LIB_SRC)\n\tgcc -Wall -o libblocklibrary.so -shared blocklibrary.o\n\tgcc -L. -o tester_dynamic $(MAIN_DYN_SRC) -ldl -D DLL\n\t./tester_dynamic $(ARGS)\t\n\n\nclean: \n\trm -f *.o *.so *.a tester_dynamic tester_shared tester_static\n" }, { "alpha_fraction": 0.5685904622077942, "alphanum_fraction": 0.5884570479393005, "avg_line_length": 24.53333282470703, "blob_id": "f6979dc57418c3504f5d27ac5f6418e1d74763da", "content_id": "f730e904baee38cfb7fd5b04b398351d2ec3cbc9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6896, "license_type": "permissive", "max_line_length": 142, "num_lines": 270, "path": "/SysOpy/lab2/src/zad1/zad1.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <bits/types/FILE.h>\n#include <stdio.h>\n#include <memory.h>\n#include <fcntl.h>\n#include <unistd.h>\n#include <time.h>\n#include <sys/resource.h>\n\n\nint parse(int argc, char** argv);\n\nvoid generate(char* fileName, int numberOfRecords, int recordSize);\n\nvoid sort(char* fileName, int numberOfRecords, int recordSize, char* mode);\n\nvoid copy(char* sourceName, char* destinationName, int howManyRecords, int bufferSize, int recordSize, char* mode);\n\n\nvoid showHelp()\n{\n\tprintf(\"Possible arguments:\\n\");\n\tprintf(\"\\tgenerate <file_name> <num_of_records> <record_size>\\n\");\n\tprintf(\"\\tsort <file_name> <num_of_records> <record_size> lib|sys\\n\");\n\tprintf(\"\\tcopy <source_file_name> <destination_file_name> <num_of_records> <buffer_size> <record_size> lib|sys\\n\");\t\n}\n\n\n\nint main(int argc, char** argv)\n{\n srand(time(NULL));\n if (parse(argc, argv) == -1)\n\t showHelp();\n\n return 0;\n}\n\n\nvoid showResults(struct rusage start, struct rusage end, char* operation)\n{\n long double timeU = (end.ru_utime.tv_sec + (end.ru_utime.tv_usec * 1.0)/1e6 - start.ru_utime.tv_sec - (start.ru_utime.tv_usec * 1.0)/1e6);\n long double timeS = (end.ru_stime.tv_sec + (end.ru_stime.tv_usec * 1.0)/1e6 - start.ru_stime.tv_sec - (start.ru_stime.tv_usec * 1.0)/1e6);\n printf(\"\\nUser time:\\t%Lf\\nSystem time:\\t%Lf\\n\\n\", timeU, timeS);\n}\n\n\nint parse(int argc, char** argv)\n{\n struct rusage start, end;\n\n if (!strcmp(argv[1], \"generate\"))\n {\n if (argc != 5) return -1;\n char* fileName = argv[2];\n int numberOfRecords = atoi(argv[3]);\n int recordSize = atoi(argv[4]);\n generate(fileName, numberOfRecords, recordSize);\n return 0;\n }\n else if (!strcmp(argv[1], \"sort\"))\n {\n if (argc != 6) return -1;\n if (strcmp(argv[5], \"sys\") && strcmp(argv[5], \"lib\")) return -1;\n char* filePath = argv[2];\n int numberOfRecords = atoi(argv[3]);\n int recordSize = atoi(argv[4]);\n char* mode = argv[5];\n\n getrusage(RUSAGE_SELF, &start);\n sort(filePath, numberOfRecords, recordSize, mode);\n getrusage(RUSAGE_SELF, &end);\n showResults(start, end, \"Sort\");\n\n return 0;\n }\n else if (!strcmp(argv[1], \"copy\"))\n {\n if (argc != 8) return -1;\n if (strcmp(argv[7], \"sys\") && strcmp(argv[7], \"lib\")) return -1;\n char* sourcePath = argv[2];\n char* destinationName = argv[3];\n\tint howManyRecords = atoi(argv[4]);\n int bufferSize = atoi(argv[5]);\n\tint recordSize = atoi(argv[6]);\n char* mode = argv[7];\n\n getrusage(RUSAGE_SELF, &start);\n copy(sourcePath, destinationName, howManyRecords, bufferSize, recordSize, mode);\n getrusage(RUSAGE_SELF, &end);\n showResults(start, end, \"Copy\");\n\n return 0;\n }\n else\n {\n return -1;\n }\n}\n\n\nunsigned char* generateRandomString(int length)\n{\n unsigned char* result = calloc(length, sizeof(char));\n for (int i=0; i<length; i++)\n {\n result[i] = (unsigned char) (rand() % (256));\n }\n return result;\n}\n\n\nvoid generate(char* fileName, int numberOfRecords, int recordSize)\n{\n FILE* handler = fopen(fileName, \"w\");\n for (int i=0; i<numberOfRecords; i++)\n {\n unsigned char* record = generateRandomString(recordSize);\n fwrite(record, sizeof(unsigned char), recordSize, handler);\n }\n fclose(handler);\n}\n\n\n\nvoid sort(char *fileName, int numberOfRecords, int recordSize, char *mode)\n{\n unsigned char* buffer1 = calloc(recordSize, sizeof(char));\n unsigned char* buffer2 = calloc(recordSize, sizeof(char));\n\n if (!strcmp(mode, \"lib\"))\n {\n FILE* handler = fopen(fileName, \"r+\");\n\t\n\tif (handler == NULL)\n\t{\n\t\tprintf(\"File opening error\\n\");\n\t\texit(-1);\n\t} \n\n for (int i=1; i<numberOfRecords; i++)\n {\n fseek(handler, recordSize*i, 0);\n\t fread(buffer1, sizeof(unsigned char), recordSize, handler);\n\n\t int j = i-1;\n\t do\n\t {\n\t\tfseek(handler, recordSize*j, 0);\n\t\tfread(buffer2, sizeof(unsigned char), recordSize, handler);\n\t\tif (buffer1[0] < buffer2[0])\n\t\t{\n\t\t\tfseek(handler, recordSize*(j+1),0);\n\t\t\tfwrite(buffer2, sizeof(unsigned char), recordSize, handler);\n\t\t}\n\t\tj--;\n\t }\n\t while (buffer1[0] < buffer2[0] && j>=0);\n\t \n\t if (buffer1[0] >= buffer2[0]) fseek(handler, recordSize*(j+2), 0);\n\t else fseek(handler, recordSize*(j+1), 0);\n\n\t fwrite(buffer1, sizeof(unsigned char), recordSize, handler);\n }\n\n fclose(handler);\n }\n else\n {\n int handler = open(fileName, O_RDWR);\n\n\tif (handler < 0)\n\t{\n\t\tprintf(\"File opening error\\n\");\n\t\texit(-1);\n\t} \n\n for (int i=1; i<numberOfRecords; i++)\n {\n lseek(handler, recordSize * i, SEEK_SET);\n read(handler, buffer1, recordSize);\n\n\t int j = i-1;\n\t do\n\t {\n\t\tlseek(handler, recordSize*j, SEEK_SET);\n\t\tread(handler, buffer2, recordSize);\n\t\tif (buffer1[0] < buffer2[0])\n\t\t{\n\t\t lseek(handler, recordSize * (j+1), SEEK_SET);\n\t write(handler, buffer2, recordSize);\n\t\t}\n\t\tj--;\n\t }\n\t while (buffer1[0] < buffer2[0] && j>=0);\n\t \n\t if (buffer1[0] >= buffer2[0]) lseek(handler, recordSize*(j+2), SEEK_SET);\n\t else lseek(handler, recordSize*(j+1), SEEK_SET);\n\n\t write(handler, buffer1, recordSize);\t\t\n\n }\n close(handler);\n }\n\n free(buffer1);\n free(buffer2);\n}\n\n\n\nvoid copy(char *sourceName, char *destinationName, int howManyRecords, int bufferSize, int recordSize, char *mode)\n{\n if (!strcmp(mode, \"lib\"))\n {\n FILE* handler1 = fopen(sourceName, \"r\");\n FILE* handler2 = fopen(destinationName, \"w\");\n\n\tif (handler1 == NULL || handler2 == NULL)\n\t{\n\t\tprintf(\"File opening error\\n\");\n\t\texit(-1);\n\t} \n\n char* buffer = calloc(bufferSize, sizeof(char));\n\t\n\tint recNum = howManyRecords*recordSize;\n int buf;\n while((buf = fread(buffer, sizeof(char), bufferSize, handler1)) && recNum > 0)\n {\n\t if (recNum - buf > 0)\t\n \tfwrite(buffer, sizeof(char), buf, handler2);\n\t else\n\t\tfwrite(buffer, sizeof(char), recNum, handler2);\n\t recNum -= buf;\n }\n\n fclose(handler1);\n fclose(handler2);\n free(buffer);\n }\n else\n {\n int handler1 = open(sourceName, O_RDONLY);\n int handler2 = open(destinationName, O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);\n\n\tif (handler1 < 0 || handler2 < 0)\n\t{\n\t\tprintf(\"File opening error\\n\");\n\t\texit(-1);\n\t} \n\n char* buffer = calloc(bufferSize, sizeof(char));\n\n int buf;\n\tint recNum = howManyRecords*recordSize;\n while((buf=read(handler1, buffer, bufferSize*sizeof(char))) && recNum > 0)\n {\n if (recNum - buf > 0)\n\t \twrite(handler2, buffer, buf);\n\t else\n\t\twrite(handler2, buffer, recNum);\n\t recNum -= buf;;\n }\n\n close(handler1);\n close(handler2);\n free(buffer);\n }\n}\n\n\n" }, { "alpha_fraction": 0.5777778029441833, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 6.333333492279053, "blob_id": "7ef1a00d5f922fa09602e9c8aa028068eb47691c", "content_id": "ece1398bca203aa10a5cee7c36c30a6ef33bd52d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 45, "license_type": "permissive", "max_line_length": 18, "num_lines": 6, "path": "/SysOpy/lab3/src/zad3b/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all: \n\tgcc -Wall zad3b.c\n\n\nclean:\n\trm a.out\n\n" }, { "alpha_fraction": 0.4990497827529907, "alphanum_fraction": 0.5110224485397339, "avg_line_length": 21.29660987854004, "blob_id": "2fe45b1f380cd019c6cd0c63117d720ce91f2706", "content_id": "65f4d5267d6ca47cba95b0459ae913ccf04ed1b7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5262, "license_type": "permissive", "max_line_length": 136, "num_lines": 236, "path": "/SysOpy/lab3/src/zad1/zad1.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#define _XOPEN_SOURCE 500\n#include<dirent.h>\n#include<string.h>\n#include<stdlib.h>\n#include<sys/stat.h>\n#include<stdio.h>\n#include <time.h>\n#include <ftw.h>\n#include <printf.h>\n#include <unistd.h>\n#include <sys/wait.h>\n\n\nvoid dirInfo_stat(char* filePath);\nint parse(int argc, char** argv);\nvoid dirInfo_nftw(char* filePath);\n\n\nchar* date;\nchar* operator;\n\n\n\n\nvoid printHelp()\n{\n printf(\"Required arguments:\\n\\t- directory path\\n\\t- operator (< > =)\\n\\t- date (yyyy-mm-dd)\\n\\t- mode (0 1)\\n\");\n}\n\n\n\n\nint main(int argc, char** argv)\n{\n if (parse(argc, argv) != 0)\n printHelp();\n return 0;\n}\n\n\n\n\nint parse(int argc, char** argv)\n{\n if (argc != 5) return -1;\n if (!strcmp(argv[2], \"=\") && !strcmp(argv[2], \"<\") && !strcmp(argv[2], \">\")) return -1;\n char* filePath = argv[1];\n operator = argv[2];\n date = argv[3];\n int mode = atoi(argv[4]);\n if (mode != 1 && mode != 0) return -1;\n\n if (mode == 0) dirInfo_stat(filePath);\n else dirInfo_nftw(filePath);\n\n return 0;\n}\n\n\n\n\ntime_t stringToDate(char *date)\n{\n int year = 0, month = 0, day = 0;\n\n time_t argDate = 0;\n if (sscanf(date, \"%4d-%2d-%2d\", &year, &month, &day) == 3)\n {\n struct tm argTimeStruct = {0};\n argTimeStruct.tm_year = year;\n argTimeStruct.tm_mon = month;\n argTimeStruct.tm_mday = day;\n argDate = mktime(&argTimeStruct);\n }\n return argDate;\n}\n\n\n\n\nchar* dateToString(time_t date)\n{\n char* timeStr = calloc(20, sizeof(char));\n struct tm* timeinfo = localtime(&date);\n strftime(timeStr, 20, \"%F\", timeinfo);\n return timeStr;\n}\n\n\n\n\nvoid printInfo(const char* path, const struct stat* stats)\n{\n char* sysDate = dateToString((stats->st_mtime));\n\n printf( (S_ISDIR(stats->st_mode)) ? \"d\" : \"-\");\n printf( (stats->st_mode & S_IRUSR) ? \"r\" : \"-\");\n printf( (stats->st_mode & S_IWUSR) ? \"w\" : \"-\");\n printf( (stats->st_mode & S_IXUSR) ? \"x\" : \"-\");\n printf( (stats->st_mode & S_IRGRP) ? \"r\" : \"-\");\n printf( (stats->st_mode & S_IWGRP) ? \"w\" : \"-\");\n printf( (stats->st_mode & S_IXGRP) ? \"x\" : \"-\");\n printf( (stats->st_mode & S_IROTH) ? \"r\" : \"-\");\n printf( (stats->st_mode & S_IWOTH) ? \"w\" : \"-\");\n printf( (stats->st_mode & S_IXOTH) ? \"x\" : \"-\");\n\n printf(\"\\t%li\",stats->st_size);\n printf(\"\\t%s\", sysDate);\n printf(\"\\t%s\\n\", path);\n}\n\n\n\n\nint fn(const char* fullPath, const struct stat* stats, int flagType, struct FTW* ftwBuf)\n{\n if (flagType == FTW_F)\n {\n time_t argDate = stringToDate(date);\n time_t systemDate = stringToDate(dateToString(stats->st_mtime));\n\n if (!strcmp(operator, \"=\"))\n {\n if (difftime(argDate, systemDate) == 0)\n {\n printInfo(fullPath, stats);\n }\n }\n else if (!strcmp(operator, \">\"))\n {\n if (difftime(argDate, systemDate) < 0)\n {\n printInfo(fullPath, stats);\n }\n }\n else if (!strcmp(operator, \"<\"))\n {\n if (difftime(argDate, systemDate) > 0)\n {\n printInfo(fullPath, stats);\n }\n }\n }\n return 0;\n}\n\n\n\n\nvoid dirInfo_nftw(char* filePath)\n{\n char path[1000];\n nftw(realpath(filePath, path), fn, 20, FTW_PHYS);\n}\n\n\n\n\nvoid dirInfo_stat(char* filePath)\n{\n DIR* directory = opendir(filePath);\n if (directory == NULL)\n {\n printf(\"Directory error\");\n exit(-1);\n }\n\n struct dirent* fileIterator = readdir(directory);\n struct stat stats;\n\n while (fileIterator != NULL)\n {\n if (strcmp(fileIterator->d_name,\".\")==0 || strcmp(fileIterator->d_name,\"..\")==0) {fileIterator = readdir(directory); continue; }\n\n char buffer[1000];\n strcpy(buffer,filePath);\n strcat(buffer,\"/\");\n strcat(buffer,fileIterator->d_name);\n lstat(buffer, &stats);\n\n char path[1000];\n\n if((S_ISDIR(stats.st_mode)))\n {\n pid_t childPID = vfork();\n if (childPID < 0)\n {\n printf(\"Fork error\");\n exit(-1);\n }\n else if (childPID == 0) {\n dirInfo_stat(realpath(buffer, path));\n exit(0);\n }\n }\n\n if(!(S_ISREG(stats.st_mode)))\n {\n fileIterator = readdir(directory);\n continue;\n }\n\n time_t argDate = stringToDate(date);\n time_t systemDate = stringToDate(dateToString(stats.st_mtime));\n\n if (!strcmp(operator, \"=\"))\n {\n if (difftime(argDate, systemDate) != 0)\n {\n fileIterator = readdir(directory);\n continue;\n }\n }\n else if (!strcmp(operator, \">\"))\n {\n if (difftime(argDate, systemDate) >= 0)\n {\n fileIterator = readdir(directory);\n continue;\n }\n }\n else if (!strcmp(operator, \"<\"))\n {\n if (difftime(argDate, systemDate) <= 0)\n {\n fileIterator = readdir(directory);\n continue;\n }\n }\n\n printInfo(realpath(buffer, path), &stats);\n\n fileIterator = readdir(directory);\n }\n closedir(directory);\n}\n" }, { "alpha_fraction": 0.4844844937324524, "alphanum_fraction": 0.5065065026283264, "avg_line_length": 17.5, "blob_id": "1f64503cc2394891d9269922c21a6ca50551f334", "content_id": "dc52435f2171d584d3bb660d3ab8a7b08bf55099", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 999, "license_type": "permissive", "max_line_length": 48, "num_lines": 54, "path": "/SysOpy/lab5/src/zad2/slave.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <memory.h>\n#include <time.h>\n\nvoid slavLogic(char* fifoName, int N)\n{\n FILE* fifo;\n printf(\"%d\\n\", getpid());\n char date[50];\n char msg[255];\n srand(time(0));\n\n for (int i=0; i<N; i++)\n {\n sprintf(msg, \"%d\", getpid());\n fifo = fopen(fifoName, \"w\");\n\n FILE* datePipe = popen(\"date\", \"r\");\n fread(date, sizeof(char), 50, datePipe);\n pclose(datePipe);\n\n strcat(msg, \"\\t\");\n strcat(msg, date);\n\n fwrite(msg, sizeof(char), 255, fifo);\n fclose(fifo);\n\n sleep(rand()%(4) + 2);\n }\n // fifo = fopen(fifoName, \"w\");\n // fwrite(\"\\n\\n\", sizeof(char), 5, fifo);\n // fclose(fifo);\n}\n\n\nint parse(int argc, char** argv)\n{\n if (argc != 3) return -1;\n int N = atoi(argv[2]);\n slavLogic(argv[1], N);\n return 0;\n}\n\n\nint main(int argc, char** argv)\n{\n if (parse(argc, argv) != 0)\n {\n printf(\"Error\");\n exit(-1);\n }\n}\n" }, { "alpha_fraction": 0.41948607563972473, "alphanum_fraction": 0.42933619022369385, "avg_line_length": 22.821428298950195, "blob_id": "b02db6a4e79ed439babbc91db6de235c0399ae83", "content_id": "d8193f44982a951e3d8c9b0afd79c586c466f985", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4670, "license_type": "permissive", "max_line_length": 77, "num_lines": 196, "path": "/SysOpy/lab6/src/zad1/server.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "\n#include <sys/types.h>\n#include <sys/ipc.h>\n#include <sys/msg.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <time.h>\n#include <string.h>\n#include <sys/shm.h>\n#include <unistd.h>\n\n#include \"properties.h\"\n\n\n\nint init()\n{\n key_t key = ftok(getenv(\"HOME\"), KEY_CHAR);\n int queue = msgget(key, IPC_CREAT | 0622);\n return queue;\n}\n\n\nstruct msgbuf {\n long mtype;\n pid_t pid;\n int key;\n char mtext[MAX_MSG_LENGTH];\n};\n\n\nstruct client\n{\n pid_t pid;\n int clientID;\n int queue;\n};\n\nchar* getDate()\n{\n time_t now;\n time(&now);\n char* result = calloc(50, sizeof(char));\n snprintf(result, MAX_MSG_LENGTH, \"%s\", ctime(&now));\n return result;\n}\n\n\nint getQueue(pid_t pid, struct client* clients, int numberOfClients)\n{\n for (int i=0; i<numberOfClients; i++)\n {\n if (clients[i].pid == pid) return clients[i].queue;\n }\n return -1;\n}\n\n\nvoid receive(int queue)\n{\n struct msgbuf msg;\n int clientID = 0;\n struct client* clients = calloc(0, sizeof(struct client));\n\n int closeFlag = 0;\n\n while (1)\n {\n msg.mtype = 0;\n msgrcv(queue, &msg, MAX_MSG_LENGTH, 0, IPC_NOWAIT);\n if (!msg.mtype && closeFlag) break;\n if (!msg.mtype) continue;\n usleep(10000);\n\n switch(msg.mtype)\n {\n case INIT:\n {\n printf(\"init\\t%d\\t%s\", msg.pid, getDate());\n\n int clientQueue = msgget(msg.key, 0);\n\n clientID++;\n clients = realloc(clients, sizeof(struct client) * clientID);\n clients[clientID - 1].pid = msg.pid;\n clients[clientID - 1].queue = clientQueue;\n clients[clientID - 1].clientID = clientID;\n\n msg.mtype = clientID;\n msgsnd(clientQueue, &msg, MAX_MSG_LENGTH, 0);\n\n break;\n }\n\n case CALC:\n {\n printf(\"calc\\t%d\\t%s\", msg.pid, getDate());\n int numbers[2];\n char op = *(strpbrk(msg.mtext, \"+-/*\"));\n numbers[0] = atoi(strtok(msg.mtext, \" +-*/\\n\"));\n numbers[1] = atoi(strtok(NULL, \"\\n\"));\n\n\n double result;\n switch(op)\n {\n case '+':\n {\n result = numbers[0] + numbers[1];\n break;\n }\n\n case '-':\n {\n result = numbers[0] - numbers[1];\n break;\n }\n\n case '*':\n {\n result = numbers[0] * numbers[1];\n break;\n }\n\n case '/':\n {\n result = (double)numbers[0] / (double)numbers[1];\n break;\n }\n }\n\n\n snprintf(msg.mtext, MAX_MSG_LENGTH, \"%f\", result);\n int clientQueue = getQueue(msg.pid, clients, clientID);\n msgsnd(clientQueue, &msg, MAX_MSG_LENGTH, 0);\n\n\n\n break;\n }\n\n case TIME:\n {\n printf(\"time\\t%d\\t%s\", msg.pid, getDate());\n time_t now;\n time(&now);\n\n int clientQueue = getQueue(msg.pid, clients, clientID);\n snprintf(msg.mtext, MAX_MSG_LENGTH, \"%s\", ctime(&now));\n msgsnd(clientQueue, &msg, MAX_MSG_LENGTH, 0);\n\n break;\n }\n\n case END:\n {\n printf(\"end\\t%d\\t%s\", msg.pid, getDate());\n closeFlag = 1;\n break;\n }\n\n case MIRROR:\n {\n printf(\"mirror\\t%d\\t%s\", msg.pid, getDate());\n char* result = calloc(strlen(msg.mtext) + 1, sizeof(char));\n\n for (int i=strlen(msg.mtext) - 1; i>=0; i--)\n {\n strcat(result, (char[2]) {(char)msg.mtext[i], '\\0'});\n }\n result[strlen(msg.mtext)] = '\\0';\n\n int clientQueue = getQueue(msg.pid, clients, clientID);\n snprintf(msg.mtext, MAX_MSG_LENGTH, \"%s\", result);\n msgsnd(clientQueue, &msg, MAX_MSG_LENGTH, 0);\n\n free(result);\n break;\n }\n\n default:\n {\n printf(\"Unknown msg type\\n\");\n exit(-1);\n }\n }\n }\n // struct msqid_ds *buf;\n msgctl(queue, IPC_RMID, NULL);\n}\n\n\nint main()\n{\n int queue = init();\n receive(queue);\n}\n" }, { "alpha_fraction": 0.6690211892127991, "alphanum_fraction": 0.6907164454460144, "avg_line_length": 25.426666259765625, "blob_id": "a09daad2d4cd5a858beac70923e361623f57cfbc", "content_id": "7ae3b3202d6b94b28053c9447d17928e0e6bfe30", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 1982, "license_type": "permissive", "max_line_length": 88, "num_lines": 75, "path": "/SysOpy/lab1/src/zad3b/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "ARGS = 2048 4096 1 1 1000 1 1000\nLIB_SRC = ../zad1/blocklibrary.c\nMAIN_SRC = ../zad2/tester.c\nMAIN_DYN_SRC = ../zad3a/tester_dynamic.c\n\nall: staticO0 staticO1 staticOs sharedO0 sharedO1 sharedOs dynamicO0 dynamicO1 dynamicOs\n\n\nstaticO0:\n\tgcc -c $(LIB_SRC)\n\tar rcs libblocklibrary.a blocklibrary.o\n\tgcc -c $(MAIN_SRC)\n\tgcc tester.o libblocklibrary.a -o tester_static\n\t./tester_static $(ARGS)\n\n\nstaticO1:\n\tgcc -c -O1 $(LIB_SRC)\n\tar rcs libblocklibrary.a blocklibrary.o\n\tgcc -c -O1 $(MAIN_SRC)\n\tgcc tester.o libblocklibrary.a -O1 -o tester_static\n\t./tester_static $(ARGS)\n\n\nstaticOs:\n\tgcc -c -Os $(LIB_SRC)\n\tar rcs libblocklibrary.a blocklibrary.o\n\tgcc -c -Os $(MAIN_SRC)\n\tgcc tester.o libblocklibrary.a -Os -o tester_static\n\t./tester_static $(ARGS)\n\n\nsharedO0:\n\tgcc -Wall -fPIC -c $(LIB_SRC)\n\tgcc -Wall -o libblocklibrary.so -shared blocklibrary.o\n\tgcc -Wl,-rpath=. -L. -o tester_shared $(MAIN_SRC) -lblocklibrary\n\t./tester_shared $(ARGS)\n\n\nsharedO1:\n\tgcc -Wall -fPIC -c $(LIB_SRC) -O1\n\tgcc -Wall -o libblocklibrary.so -shared blocklibrary.o -O1\n\tgcc -Wl,-rpath=. -L. -o tester_shared $(MAIN_SRC) -lblocklibrary -O1\n\t./tester_shared $(ARGS)\n\n\t\nsharedOs:\n\tgcc -Wall -fPIC -c $(LIB_SRC) -Os\n\tgcc -Wall -o libblocklibrary.so -shared blocklibrary.o -Os\n\tgcc -Wl,-rpath=. -L. -o tester_shared $(MAIN_SRC) -lblocklibrary -Os\n\t./tester_shared $(ARGS)\n\n\ndynamicO0:\n\tgcc -Wall -fPIC -c $(LIB_SRC)\n\tgcc -Wall -o libblocklibrary.so -shared blocklibrary.o\n\tgcc -L. -o tester_dynamic $(MAIN_DYN_SRC) -ldl -D DLL\n\t./tester_dynamic $(ARGS)\t\n\n\ndynamicO1:\n\tgcc -Wall -fPIC -c $(LIB_SRC) -O1\n\tgcc -Wall -o libblocklibrary.so -shared blocklibrary.o -O1\n\tgcc -L. -o tester_dynamic $(MAIN_DYN_SRC) -ldl -D DLL -O1\n\t./tester_dynamic $(ARGS)\n\n\ndynamicOs:\n\tgcc -Wall -fPIC -c $(LIB_SRC) -Os\n\tgcc -Wall -o libblocklibrary.so -shared blocklibrary.o -Os\n\tgcc -L. -o tester_dynamic $(MAIN_DYN_SRC) -ldl -D DLL -Os\n\t./tester_dynamic $(ARGS)\n\nclean: \n\trm -f *.o *.so *.a tester_dynamic tester_shared tester_static\n" }, { "alpha_fraction": 0.7011494040489197, "alphanum_fraction": 0.7011494040489197, "avg_line_length": 16.399999618530273, "blob_id": "cd4a0a04a75140fc839e658df173aab72fe55938", "content_id": "cb583b9a773613f4e9df1540a06619210d948a1b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 87, "license_type": "permissive", "max_line_length": 57, "num_lines": 5, "path": "/SysOpy/lab9/src/zad2/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all: main.c\n\tgcc -Wall main.c -o main -lpthread -g -fsanitize=address\n\nclean:\n\trm main\n" }, { "alpha_fraction": 0.5201793909072876, "alphanum_fraction": 0.5695067048072815, "avg_line_length": 15.307692527770996, "blob_id": "d5c7650c2a440abcff09ebd34ac789096f6be798", "content_id": "7e62bb3e248d53a7607220afe19e67268dc56bd8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "permissive", "max_line_length": 27, "num_lines": 13, "path": "/Python/lab1/src/p2.py", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "import sump as s\r\n\r\n# liczby doskonałe\r\nfor i in range(1,1000):\r\n\tif s.sump(i) == i:\r\n\t\tprint(i)\r\n\r\nprint(\"----------\")\r\n\r\n#liczby zaprzyjaźnione\r\nfor i in range(1,10000):\r\n\tif s.sump(s.sump(i)) == i:\r\n\t\tprint(i, s.sump(i))" }, { "alpha_fraction": 0.6041666865348816, "alphanum_fraction": 0.6458333134651184, "avg_line_length": 8.600000381469727, "blob_id": "aea1e7acfbf2720cb48515298ed92ebbc49c340c", "content_id": "701e0d5085360893ffdb3bfb14b9f5e7c83c5051", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 48, "license_type": "permissive", "max_line_length": 17, "num_lines": 5, "path": "/SysOpy/lab4/src/zad2/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all: zad2.c\n\tgcc -Wall zad2.c\n\nclean:\n\trm a.out\n" }, { "alpha_fraction": 0.6561869978904724, "alphanum_fraction": 0.6630390882492065, "avg_line_length": 19.9743595123291, "blob_id": "f55c17c3ac13fcbd6e6c06749dae003a6a6e3ed1", "content_id": "25131c65209357fad339a9da66e850ac16969ad2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2481, "license_type": "permissive", "max_line_length": 114, "num_lines": 117, "path": "/SysOpy/lab1/src/zad1/blocklibrary.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include<stdlib.h>\n#include<string.h>\n#include<math.h>\n\nchar staticArray[8192][8192];\n\n\n// dynamic allocation\nchar** createArray(int arraySize, int blockSize);\nvoid deleteArrayDynamic(char** array, int arraySize);\nvoid addBlockDynamic(char** array, char* block, int position, int blockSize);\nvoid deleteBlockDynamic(char** array, int position);\nchar* findBlockDyn(char** array, int position, int arraySize, int blockSize);\n\n\n// static allocation\nvoid addBlockStatic(char* block, int position, int blockSize);\nchar* findBlockStat(int position, int arraySize, int blockSize);\n\n\n// helper functions\nint sumChar(char* block, int blockSize);\n\n\n\nchar** createArray(int arraySize, int blockSize)\n{\n\tchar** arr = calloc(arraySize, sizeof(char*));\n\treturn arr;\n}\n\n\n\nvoid deleteArrayDynamic(char** array, int arraySize)\n{\n\tfor (int i=0; i<arraySize; i++)\n\t{\n\t\tfree(array[i]);\n\t}\n\tfree(array);\n}\n\n\n\nvoid addBlockStatic(char* block, int position, int blockSize)\n{\n\tstrncpy(block, staticArray[position], blockSize);\n}\n\n\n\nvoid addBlockDynamic(char** array, char* block, int position, int blockSize)\n{\n\tarray[position] = block;\n}\n\n\n\nvoid deleteBlockDynamic(char** array, int position)\n{\n\tfree(array[position]);\n\tarray[position] = NULL;\n}\n\n\n\nint sumChar(char* block, int blockSize)\n{\n\tint sum = 0;\n\tfor (int i=0; i<blockSize; i++)\n\t\tsum += (int)block[i];\n\treturn sum;\n}\n\n\n\nchar* findBlockDyn(char** array, int position, int arraySize, int blockSize)\n{\n\tif (array == NULL) return NULL;\n\tif (array[position] == NULL) return NULL;\n\t\n\tint first = 0;\n\twhile ((array[first] == NULL || first == position) && first < arraySize)\n\t{\n\t\tfirst++;\n\t}\n\n\tif (first == arraySize) return NULL;\n\t\n\tint min = abs(sumChar(array[first], blockSize) - sumChar(array[position], blockSize));\n\tint best = first;\n\tfor (int i=first+1; i<arraySize; i++)\n\t{\n\t\tif (array[i]!=NULL && i != position)\n\t\t{\n\t\t\tif (abs(sumChar(array[i], blockSize) - sumChar(array[position], blockSize)) < min) best = i;\n\t\t}\n\t}\n\treturn array[best];\n}\n\n\n\nchar* findBlockStat(int position, int arraySize, int blockSize)\n{\n int best = 0;\n\tif (position == 0) best = 1;\n\tint min = abs(sumChar(staticArray[best], blockSize) - sumChar(staticArray[position], blockSize));\n for (int i=1; i<arraySize; i++)\n {\n\t if (i != position)\n\t {\n\t\t if (abs(sumChar(staticArray[i], blockSize) - sumChar(staticArray[position], blockSize)) < min) best = i;\n\t\t}\n }\n return staticArray[best];\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.42890647053718567, "alphanum_fraction": 0.44825565814971924, "avg_line_length": 21.006452560424805, "blob_id": "df4d11b02140dbf2ae290f86e109064a8f16a4e4", "content_id": "3a9a45ec69ccaaa0d64b5a615b18a5ba93aad150", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3411, "license_type": "permissive", "max_line_length": 87, "num_lines": 155, "path": "/SysOpy/lab5/src/zad1/zad1.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <memory.h>\n#include <unistd.h>\n#include <libgen.h>\n#include <sys/types.h>\n #include <sys/wait.h>\n\n\nchar** prepareArguments(char *program, char* separator, int asciiz, int* length)\n{ \n char **arguments = NULL;\n char *p = strtok(program, separator);\n int n_spaces = 0;\n\n while (p)\n {\n arguments = realloc(arguments, sizeof(char *) * ++n_spaces);\n if (arguments == NULL) exit(-1);\n \n arguments[n_spaces - 1] = p;\n p = strtok(NULL, separator); \n }\n\n if (asciiz) // adding NULL at the end\n {\n arguments = realloc(arguments, sizeof(char *) * (n_spaces + 1));\n arguments[n_spaces] = 0;\n }\n \n for (int i = 0; i < n_spaces; i++) // deleting newlines\n\tif (strchr(arguments[i], '\\n') != NULL)\n\t{ \n\t\tchar* buf = calloc(strlen(arguments[i])+1, sizeof(char));\n\t\tfor (int j=0; j<strlen(arguments[i])-1; j++)\n\t\t\tbuf[j] = arguments[i][j];\t\t\n\t buf[strlen(arguments[i])-1] = '\\0';\n\t\targuments[i] = buf;\n\t}\n\n if (length != NULL) *length = n_spaces;\n return arguments;\n}\n\n\n\nvoid readAndExecute(char* fileName)\n{\n FILE* file = fopen(fileName, \"r\");\n if (file == NULL)\n {\n printf(\"File opening error\");\n exit(-1);\n }\n \n\n size_t bufSize = 100;\n char* buffer = calloc(bufSize, sizeof(char));\n\n \n\n while (getline(&buffer, &bufSize, file) != -1)\n {\n int n_spaces2;\n\n char** programs = prepareArguments(buffer, \"|\", 0, &n_spaces2);\n\t\n\n int **fd = calloc(n_spaces2 - 1, sizeof(int *));\n for (int i = 0; i < n_spaces2 - 1; i++)\n {\n fd[i] = calloc(2, sizeof(int));\n pipe(fd[i]);\n }\n\n\n for (int i=0; i<n_spaces2; i++)\n {\n\n char **arguments = prepareArguments(programs[i], \" \", 1, NULL);\n\n int result = 0;\n\n pid_t child = fork();\n\n if (child < 0) exit(-1);\n else if (child == 0)\n {\n if (i != 0)\n {\n dup2(fd[i-1][0], 0); // previous output -> current input\n close(fd[i-1][0]);\n close(fd[i-1][1]);\n }\n if (i != n_spaces2 - 1)\n {\n dup2(fd[i][1], 1); // current output -> next input\n close(fd[i][0]);\n close(fd[i][1]);\n\n }\n\n\n\n if (execv(arguments[0], arguments) == -1)\n { \n\t\t if (execvp(basename(arguments[0]), arguments) == -1)\n {\n\t\t\t\texit(-1);\n }\n }\n }\n else\n { \n//\t\tusleep(1000);\n if (WIFEXITED(result) && WEXITSTATUS(result) != 0)\n {\n printf(\"!!!Operation terminated!!!\\nError in: %s\\n\", arguments[0]);\n exit(-1);\n }\n }\n }\n\n free(programs);\n }\n\n\n free(buffer);\n fclose(file);\n}\n\n\n\nint parse(int argc, char** argv)\n{\n if (argc != 2) return -1;\n char* fileName = argv[1]; \n readAndExecute(fileName);\n return 0;\n}\n\n\n\nvoid printHelp()\n{\n printf(\"Possible arguments:\\n\\tfilePath\\n\");\n}\n\n\n\nint main(int argc, char** argv)\n{\n if (parse(argc, argv) != 0) printHelp();\n return 0;\n}\n" }, { "alpha_fraction": 0.761403501033783, "alphanum_fraction": 0.761403501033783, "avg_line_length": 22.75, "blob_id": "cf5f3c5497e3b5d9a47ca0ccd67fd8b7fb70e876", "content_id": "1477eeff47f2071164d17fa751cf7ad402052561", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 285, "license_type": "permissive", "max_line_length": 55, "num_lines": 12, "path": "/SysOpy/lab1/src/zad1/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all: libraryStatic libraryShared\n\nlibraryStatic: blocklibrary.c\n\tgcc -Wall -c blocklibrary.c\n\tar rcs libblocklibrary.a blocklibrary.o\n\nlibraryShared: blocklibrary.c\n\tgcc -Wall -fPIC -c blocklibrary.c\n\tgcc -Wall -o libblocklibrary.so -shared blocklibrary.o\n\nclean: \n\trm -f *.o *.a *.so\n" }, { "alpha_fraction": 0.4488251209259033, "alphanum_fraction": 0.4656994044780731, "avg_line_length": 23.022727966308594, "blob_id": "c9b3468b29264af5d8c9c4b5126e6a206528a4b1", "content_id": "c2f8660fa836e27640b0a39940e665a6ca422149", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6341, "license_type": "permissive", "max_line_length": 120, "num_lines": 264, "path": "/SysOpy/lab10/src/zad1/server.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <sys/un.h>\n#include <netinet/in.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <unistd.h>\n#include <time.h>\n#include <sys/epoll.h>\n#include <pthread.h>\n#include <signal.h>\n\n\nstruct node\n{\n char* name;\n int fd;\n int num;\n};\n\n\nint numOfClients = 0;\nint maxEvenets = 100;\nint clusterSize = 20;\nint socPoll;\n\n\nstruct node* clients;\n\n\nint initLocal(char* path)\n{\n int listenfd = 0;\n struct sockaddr_un serv_addr;\n\n listenfd = socket(AF_UNIX, SOCK_STREAM | SOCK_NONBLOCK, 0);\n\n serv_addr.sun_family = AF_UNIX;\n strcpy(serv_addr.sun_path, path);\n\n unlink(path);\n bind(listenfd, (struct sockaddr*)&serv_addr, sizeof(serv_addr));\n listen(listenfd, 10);\n\n return listenfd;\n}\n\n\nint initNet(int port)\n{\n int listenfd = 0;\n struct sockaddr_in serv_addr;\n\n listenfd = socket(AF_INET, SOCK_STREAM, 0);\n int option = 1;\n setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, &option, sizeof(option));\n\n serv_addr.sin_family = AF_INET;\n serv_addr.sin_addr.s_addr = htonl(INADDR_ANY);\n serv_addr.sin_port = htons(port);\n\n bind(listenfd, (struct sockaddr*)&serv_addr, sizeof(serv_addr));\n listen(listenfd, 10);\n\n return listenfd;\n}\n\n\nstruct soc\n{\n int port;\n char* path;\n};\n\n\nvoid* monitor(void* arg) {\n\n struct soc soc = *(struct soc*)arg;\n socPoll = epoll_create1(0);\n\n struct epoll_event event;\n\n int netFd = initNet(soc.port);\n event.data.fd = netFd;\n event.events = EPOLLIN | EPOLLET;\n epoll_ctl(socPoll, EPOLL_CTL_ADD, netFd, &event);\n\n int localFd = initLocal(soc.path);\n event.data.fd = localFd;\n event.events = EPOLLIN | EPOLLET;\n epoll_ctl(socPoll, EPOLL_CTL_ADD, localFd, &event);\n\n struct epoll_event *events = calloc(maxEvenets, sizeof(struct epoll_event));\n\n char readBuff[128];\n\n while (1)\n {\n int n = epoll_wait(socPoll, events, maxEvenets, -1);\n for (int i = 0; i < n; i++)\n {\n for (int j=0; j<128; j++) readBuff[j] = '\\0';\n\n if (events[i].data.fd == netFd || events[i].data.fd == localFd)\n {\n int clientFD = accept(events[i].data.fd, (struct sockaddr *) NULL, 0);\n if (clientFD == -1) continue;\n\n recv(clientFD, readBuff, 128, 0);\n if (readBuff[0] == '1')\n {\n char *read2 = calloc(128, sizeof(char));\n strcpy(read2, readBuff);\n\n char *name = strtok(read2 + sizeof(char), \"\\0\");\n\n int flag = 1;\n for (int j = 0; j < clusterSize; j++)\n {\n if (!strcmp(clients[j].name, name))\n {\n flag = 0;\n }\n }\n\n if (flag == 1)\n {\n for (int j=0; j<clusterSize; j++)\n {\n if (clients[j].fd == -1)\n {\n clients[j].name = name;\n clients[j].fd = clientFD;\n clients[j].num = numOfClients;\n break;\n }\n }\n printf(\"Node up: %d (%s)\\n\", numOfClients, name);\n numOfClients++;\n\n event.data.fd = clientFD;\n event.events = EPOLLIN | EPOLLET;\n epoll_ctl(socPoll, EPOLL_CTL_ADD, clientFD, &event);\n\n send(clientFD, \"1Y\", 2, MSG_DONTWAIT);\n }\n else\n {\n write(clientFD, \"1N\", 2);\n close(clientFD);\n }\n }\n else\n {\n printf(\"Unknown message\");\n }\n\n }\n else\n {\n recv(events[i].data.fd, readBuff, 128, 0);\n if (readBuff[0] == '3')\n {\n printf(\"Result: %s, Node: %d\\n\", strtok(readBuff + 2 * sizeof(char), \"\\0\"), (int)(readBuff[1]) - 1);\n }\n }\n }\n\n usleep(1000);\n }\n}\n\n\nvoid* ping(void* a)\n{\n struct epoll_event event;\n while (1)\n {\n sleep(1);\n for (int i = 0; i < clusterSize; i++)\n {\n if (clients[i].fd != -1 && send(clients[i].fd, \"2\", 2, MSG_NOSIGNAL) == -1)\n {\n printf(\"Node down: %d\\n\", clients[i].num);\n\n event.data.fd = clients[i].fd;\n event.events = EPOLLIN | EPOLLET;\n epoll_ctl(socPoll, EPOLL_CTL_DEL, clients[i].fd, &event);\n\n close(clients[i].fd);\n clients[i].fd = -1;\n clients[i].name = \"\";\n clients[i].num = -1;\n }\n }\n }\n}\n\n\nvoid* calc(void* a)\n{\n srand(time(0));\n char* buf = calloc(128, sizeof(char));\n size_t size = 127;\n while(1)\n {\n char* msg = calloc(130, sizeof(char));\n strcat(msg, \"3\");\n getline(&buf, &size, stdin);\n\n int target;\n int p;\n while ((target = clients[p = rand()%clusterSize].fd) == -1);\n\n char pos[] = {(char)(p + 1), '\\0'};\n strcat(msg, pos);\n strcat(msg, buf);\n\n write(target, msg, 130);\n free(msg);\n usleep(1000);\n }\n}\n\n\nvoid signalHandling(int sig)\n{\n for (int i=0; i<clusterSize; i++)\n {\n close(clients[i].fd);\n }\n free(clients);\n printf(\"\\n\");\n exit(0);\n}\n\n\nint main(int argc, char** argv)\n{\n clients = calloc(clusterSize, sizeof(struct node));\n for (int i=0; i<clusterSize; i++)\n {\n clients[i].name = \"\";\n clients[i].fd = -1;\n clients[i].num = -1;\n }\n\n signal(SIGINT, signalHandling);\n\n pthread_t watcher, pinger, sender;\n\n struct soc* s = calloc(1, sizeof(struct soc));\n s->path = argv[2];\n s->port = atoi(argv[1]);\n\n pthread_create(&watcher, NULL, monitor, s);\n pthread_create(&pinger, NULL, ping, NULL);\n pthread_create(&sender, NULL, calc, NULL);\n\n pthread_join(watcher, NULL);\n pthread_join(pinger, NULL);\n pthread_join(sender, NULL);\n\n return 0;\n}" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.6386554837226868, "avg_line_length": 28.75, "blob_id": "1a2c6e282e51e43adf031c3c2bd4ac00dc055006", "content_id": "bd5ee63cdd09471c450ecc80a32d53cd3a90f1c1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 119, "license_type": "permissive", "max_line_length": 65, "num_lines": 4, "path": "/SysOpy/lab6/src/zad2/properties.h", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "\n#define SERVER \"/server\"\n#define MAX_MSG_LENGTH 128\n\nenum msgType {INIT = 1, MIRROR = 2, TIME = 3, CALC = 4, END = 5};" }, { "alpha_fraction": 0.7175572514533997, "alphanum_fraction": 0.7175572514533997, "avg_line_length": 17.714284896850586, "blob_id": "294cbc3bf16e3e3573821ebf87c92ffd905165c0", "content_id": "1cf779817aa7bfb87b57439a4059a7954b600095", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 131, "license_type": "permissive", "max_line_length": 39, "num_lines": 7, "path": "/SysOpy/lab8/src/zad1/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all: main.c generator.c\n\tgcc -Wall main.c -o main -lpthread -lm\n\tgcc -Wall generator.c -o generator\n\nclean:\n\trm main\n\trm generator\n" }, { "alpha_fraction": 0.5293945670127869, "alphanum_fraction": 0.5390465259552002, "avg_line_length": 19.72121238708496, "blob_id": "8f41c5694c1dcb1378f1c773370ceada2d783c8b", "content_id": "22287bb02e21e1fdc84e1516a11e982a918a9d28", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3419, "license_type": "permissive", "max_line_length": 99, "num_lines": 165, "path": "/SysOpy/lab7/src/zad2/barber.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <semaphore.h>\n#include <sys/param.h>\n#include <sys/sem.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <sys/msg.h>\n#include <unistd.h>\n#include <time.h>\n#include <sys/mman.h>\n#include <fcntl.h>\n\n\n\nenum status\n{\n SLEEP,\n WAKES,\n SITS,\n SITS_FIRST,\n START,\n END,\n LEAVES,\n INVITES\n};\n\nstruct data\n{\n int waitingClients;\n int queueLength;\n enum status status;\n int clientPid;\n int barberPid;\n};\n\n\n\nstruct data* cMemPtr;\nsem_t* sem;\nint queue;\nint commonMemory;\n\n\nlong getTime()\n{\n struct timespec times;\n\n clock_gettime(CLOCK_MONOTONIC, &times);\n return times.tv_nsec;\n}\n\n\nvoid barberLogic()\n{\n struct sembuf* sops = calloc(1, sizeof(struct sembuf));\n sops[0].sem_flg = 0;\n\n cMemPtr->barberPid = getpid();\n\n struct msgbuf\n {\n long mtype;\n pid_t pid;\n char mtext[1];\n };\n\n struct msgbuf msg;\n msg.mtype = 1;\n\n\n while(1)\n {\n sem_wait(sem);\n\n\n if (cMemPtr->status == WAKES && cMemPtr->barberPid == getpid())\n {\n cMemPtr->status = SITS_FIRST;\n printf(\"Barber wakes up\\t\\t\\t\\t%ld\\n\", getTime());\n sem_post(sem);\n continue;\n }\n else if (cMemPtr->status == START && cMemPtr->barberPid == getpid())\n {\n cMemPtr->status = END;\n printf(\"Barber starts cutting\\t\\t%d\\t%ld\\n\", cMemPtr->clientPid, getTime());\n sem_post(sem);\n continue;\n }\n else if (cMemPtr->status == END && cMemPtr->barberPid == getpid())\n {\n cMemPtr->status = LEAVES;\n printf(\"Barber ends cutting\\t\\t%d\\t%ld\\n\", cMemPtr->clientPid, getTime());\n sem_post(sem);\n continue;\n }\n else if (cMemPtr->status == INVITES && cMemPtr->barberPid == getpid())\n {\n // usleep(1000);\n if (cMemPtr->waitingClients > 0)\n {\n msgrcv(queue, &msg, 10, 0, IPC_NOWAIT);\n cMemPtr->clientPid = msg.pid;\n cMemPtr->status = SITS;\n printf(\"Barber invites next client\\t%d\\t%ld\\n\", msg.pid, getTime());\n }\n else\n {\n cMemPtr->status = SLEEP;\n cMemPtr->clientPid = 0;\n printf(\"Barber falls asleep\\t\\t\\t%ld\\n\", getTime());\n }\n sem_post(sem);\n continue;\n }\n else\n {\n sem_post(sem);\n }\n }\n}\n\n\n\nvoid signalHandler(int sig)\n{\n msgctl(queue, IPC_RMID, NULL);\n\n munmap(cMemPtr, sizeof(struct data));\n shm_unlink(\"mem\");\n\n sem_close(sem);\n\n exit(0);\n}\n\n\n\nint main(int argc, char** argv)\n{\n\n struct data sharedData;\n sharedData.status = INVITES;\n sharedData.waitingClients = 0;\n sharedData.queueLength = atoi(argv[1]);\n sharedData.clientPid = 0;\n\n\n commonMemory = shm_open(\"mem\", O_CREAT | O_RDWR, 0622);\n ftruncate(commonMemory, sizeof(struct data));\n cMemPtr = malloc(sizeof(struct data));\n cMemPtr = mmap(NULL, sizeof(struct data), PROT_READ | PROT_WRITE, MAP_SHARED, commonMemory, 0);\n\n key_t key = ftok(getenv(\"HOME\"), 'c');\n queue = msgget(key, IPC_CREAT | 0622);\n// printf(\"%d\\n\", queue);\n\n sem = sem_open(\"sem\", O_CREAT | O_RDWR, 0622, 1);\n sem_post(sem);\n\n *cMemPtr = sharedData;\n\n signal(SIGINT, signalHandler); //TODO switch to SIGTERM\n\n barberLogic();\n}\n" }, { "alpha_fraction": 0.5925925970077515, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 6.714285850524902, "blob_id": "e372c0b1fe903b056b4f1bcb4f9b644d7d7b5d95", "content_id": "74cc8b3d7ce49abb23dc92bac469f7e4e47b18e7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 54, "license_type": "permissive", "max_line_length": 11, "num_lines": 7, "path": "/SysOpy/lab4/src/zad1/script", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nwhile true \ndo\n\tdate '+%T'\n\tsleep 1\ndone\n" }, { "alpha_fraction": 0.5909090638160706, "alphanum_fraction": 0.6136363744735718, "avg_line_length": 6.166666507720947, "blob_id": "613e0530305882e91abb881a590441ce5f34aa71", "content_id": "eff6f7f3d0d080c60267f79888693d8f0d704954", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 44, "license_type": "permissive", "max_line_length": 18, "num_lines": 6, "path": "/SysOpy/lab3/src/zad3a/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "\nall:\n\tgcc -Wall zad3a.c\n\n\nclean:\n\trm a.out\n" }, { "alpha_fraction": 0.47519582509994507, "alphanum_fraction": 0.5065274238586426, "avg_line_length": 14.739130020141602, "blob_id": "1142add1e5daabb317affd737c1b375826107918", "content_id": "48a99ebec88b241370248cc600b8a28edb275417", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "permissive", "max_line_length": 50, "num_lines": 23, "path": "/Python/lab1/src/sump.py", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "def sump(n):\r\n\tsum = 1\r\n\tp = 2\r\n\twhile p*p <= n:\r\n\t\tif n%p == 0: sum = sum + p + n//p\r\n\t\tp += 1\r\n\tif p*p == n:\r\n\t\tsum += p\r\n\treturn sum\r\n\t\r\n\t\r\ndef sump2(n):\r\n\t\"Funkcja zwraca sumę podzielnikow\" # dokumentacja\r\n\tsum = 0\r\n\tp = 1\r\n\twhile p < n:\r\n\t\tif n%p == 0: sum = sum + p\r\n\t\tp += 1\r\n\treturn sum\r\n\r\n\t\r\nif __name__ == \"__main__\" : sump(120)\r\n# kod nie wykonuje się przy każdym imporcie" }, { "alpha_fraction": 0.5673245787620544, "alphanum_fraction": 0.5811999440193176, "avg_line_length": 19.472000122070312, "blob_id": "83af28cf268bd4a0351beb325e0b56c89e7cc4f9", "content_id": "048e310c3c099ecd07153d40895cda1600f2971d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5117, "license_type": "permissive", "max_line_length": 100, "num_lines": 250, "path": "/SysOpy/lab9/src/zad2/main.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <pthread.h>\n#include <memory.h>\n#include <unistd.h>\n#include <semaphore.h>\n\n\nstruct conf\n{\n int numberOfProducers;\n int numberOfConsumers;\n int bufferLength;\n char* fileName;\n int stringLength;\n int searchMode;\n int verbalMode;\n int timeout;\n};\n\n\nstruct line\n{\n char* text;\n int lineNumber;\n};\n\n\nstruct conf CONF;\nstruct line* BUFFER;\nFILE* SOURCE;\nint endOfFile = 0;\n\nint consumerPos = 0;\nint producerPos = 0;\nint numberOfElements = 0;\nint lineNumber = 0;\n\nsem_t sem, emptySem, fullSem;\n\n\nstruct conf* parseConf(char* confFileName)\n{\n struct conf* configuration = calloc(1, sizeof(struct conf));\n\n FILE* confFile = fopen(confFileName, \"r\");\n\n fscanf(confFile, \"%d\", &configuration->numberOfProducers);\n fscanf(confFile, \"%d\", &configuration->numberOfConsumers);\n fscanf(confFile, \"%d\", &configuration->bufferLength);\n char* buffer = calloc(1000, sizeof(char));\n fscanf(confFile, \"%s\", buffer);\n (configuration->fileName) = buffer;\n fscanf(confFile, \"%d\", &configuration->stringLength);\n fscanf(confFile, \"%d\", &configuration->searchMode);\n fscanf(confFile, \"%d\", &configuration->verbalMode);\n fscanf(confFile, \"%d\", &configuration->timeout);\n\n fclose(confFile);\n\n return configuration;\n}\n\n\nint addToBuffer()\n{\n sem_wait(&sem);\n\n if (numberOfElements == CONF.bufferLength)\n {\n sem_post(&sem);\n sem_wait(&fullSem);\n sem_post(&fullSem);\n return 0;\n }\n\n char* buffer = calloc(1000, sizeof(char));\n\n if (fgets(buffer, 1000, SOURCE))\n {\n struct line* currentLine = calloc(1, sizeof(struct line));\n currentLine->text = calloc(1000, sizeof(char));\n lineNumber++;\n currentLine->lineNumber = lineNumber;\n strcpy(currentLine->text, buffer);\n\n BUFFER[producerPos] = *currentLine;\n\n if (CONF.verbalMode) printf(\"Adding to buffer: %s\", currentLine->text);\n\n numberOfElements++;\n producerPos++;\n if (producerPos == CONF.bufferLength)\n producerPos = 0;\n\n free(currentLine);\n }\n else if (CONF.timeout == 0)\n {\n endOfFile = 1;\n free(buffer);\n sem_post(&emptySem);\n sem_post(&sem);\n\n return -1;\n }\n\n if (numberOfElements == 1)\n {\n sem_post(&emptySem);\n }\n\n free(buffer);\n sem_post(&sem);\n\n return 0;\n}\n\n\nenum searchMode\n{\n EQUAL = 0,\n LESSER = 1,\n GREATER = 2\n};\n\n\nint removeFromBuffer()\n{\n sem_wait(&sem);\n\n if (numberOfElements == 0 && endOfFile == 0)\n {\n sem_post(&sem);\n sem_wait(&emptySem);\n sem_post(&emptySem);\n\n return 0;\n }\n\n if (endOfFile == 1 && numberOfElements == 0)\n {\n sem_post(&sem);\n return -1;\n }\n\n size_t length = strlen(BUFFER[consumerPos].text) - 1;\n\n if (CONF.verbalMode)\n printf(\"Removing from buffer: %s\", BUFFER[consumerPos].text);\n\n if ((length == CONF.stringLength && CONF.searchMode == EQUAL) ||\n (length < CONF.stringLength && CONF.searchMode == LESSER) ||\n (length > CONF.stringLength && CONF.searchMode == GREATER))\n {\n printf(\"Line: %d\\tText: %s\", BUFFER[consumerPos].lineNumber, BUFFER[consumerPos].text);\n }\n\n free(BUFFER[consumerPos].text);\n consumerPos++;\n if (consumerPos == CONF.bufferLength)\n consumerPos = 0;\n numberOfElements--;\n\n if (numberOfElements == CONF.bufferLength - 1)\n {\n sem_post(&fullSem);\n }\n\n sem_post(&sem);\n\n return 0;\n}\n\n\nvoid* producerLogic(void* arg)\n{\n while(1)\n {\n if (addToBuffer() == -1 && CONF.timeout == 0)\n {\n perror(\"p\");\n return NULL;\n }\n usleep(500);\n }\n}\n\n\nvoid* consumerLogic(void* arg)\n{\n while(1)\n {\n if (removeFromBuffer() == -1 && CONF.timeout == 0)\n {\n perror(\"c\");\n return NULL;\n }\n usleep(1000);\n }\n}\n\n\nint main(int argc, char** argv)\n{\n if (argc != 2) return -1;\n char* confFileName = argv[1];\n\n struct conf* confPtr = parseConf(confFileName);\n CONF = *confPtr;\n free(confPtr);\n\n sem_init(&sem, 0, 1);\n sem_init(&emptySem, 0, 0);\n sem_init(&fullSem, 0, 1);\n\n\n BUFFER = calloc(CONF.bufferLength, sizeof(struct line));\n\n SOURCE = fopen(CONF.fileName, \"r\");\n\n pthread_t* threads = calloc(CONF.numberOfConsumers + CONF.numberOfProducers, sizeof(pthread_t));\n\n\n for (int i=0; i<CONF.numberOfConsumers; i++)\n {\n pthread_create(&threads[i], NULL, consumerLogic, \"b\");\n }\n for (int i=0; i<CONF.numberOfProducers; i++)\n {\n pthread_create(&threads[i+CONF.numberOfConsumers], NULL, producerLogic, \"a\");\n }\n\n if (CONF.timeout == 0)\n {\n for (int i=0; i< CONF.numberOfConsumers + CONF.numberOfProducers; i++)\n pthread_join(threads[i], NULL);\n }\n else\n {\n sleep(CONF.timeout);\n for (int i=0; i< CONF.numberOfConsumers + CONF.numberOfProducers; i++)\n pthread_cancel(threads[i]);\n }\n\n free(threads);\n fclose(SOURCE);\n\n return 0;\n}" }, { "alpha_fraction": 0.5517241358757019, "alphanum_fraction": 0.6206896305084229, "avg_line_length": 27.75, "blob_id": "065b419818fccea3f7d564c3988a5f95f1fe9e7d", "content_id": "8766bb6b5d206ef3b236edb22b32d4f972225db8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 116, "license_type": "permissive", "max_line_length": 65, "num_lines": 4, "path": "/SysOpy/lab6/src/zad1/properties.h", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "\n#define KEY_CHAR 'c'\n#define MAX_MSG_LENGTH 128\n\nenum msgType {INIT = 1, MIRROR = 2, TIME = 3, CALC = 4, END = 5};\n" }, { "alpha_fraction": 0.47720929980278015, "alphanum_fraction": 0.4924651086330414, "avg_line_length": 22.16810417175293, "blob_id": "b26edceb6b238d13fb07bec4c31b79821543e11a", "content_id": "10b7f668e4ff3257690dae687d14b940843e0d61", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5375, "license_type": "permissive", "max_line_length": 132, "num_lines": 232, "path": "/SysOpy/lab4/src/zad2/zad2.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <memory.h>\n#include <unistd.h>\n#include <time.h>\n#include <signal.h>\n#include <sys/wait.h>\n\n\nenum stats {PID = 1, ASK = 2, PERM = 4, SIG = 8, KILL = 16};\n\nint LEFTREQUESTS;\nint REQUESTS;\npid_t* PENDING;\nint OPTIONS = 0;\n\n\nint countEnumValue(char** args, int howMany)\n{\n int result = 0;\n for (int i=0; i<howMany; i++)\n {\n if (args[i] == NULL) return result;\n if (!strcmp(args[i], \"pid\")) result += PID;\n else if (!strcmp(args[i], \"ask\")) result += ASK;\n else if (!strcmp(args[i], \"perm\")) result += PERM;\n else if (!strcmp(args[i], \"sig\")) result += SIG;\n else if (!strcmp(args[i], \"kill\")) result += KILL;\n else result += 0;\n }\n\n return result;\n}\n\n\n\nvoid signalHandling(int sig, siginfo_t* stats, void* data)\n{\n switch(sig)\n {\n case SIGUSR2:\n {\n int sigNumber = rand()%32;\n usleep((rand()%500 + 250)*1000);\n kill(getppid(), SIGRTMIN+sigNumber);\n\n break;\n }\n\n case SIGINT:\n {\n printf(\"Terminated\\n\");\n kill(-getpid(), SIGKILL);\n\n break;\n }\n\n case SIGUSR1:\n {\n if (OPTIONS & ASK) printf(\"Request received: PID = %d\\n\", stats->si_pid);\n\n LEFTREQUESTS--;\n if (LEFTREQUESTS >= 0)\n {\n PENDING[LEFTREQUESTS] = stats->si_pid;\n }\n else\n {\n if (OPTIONS & PERM) printf(\"Permission given: PID = %d\\n\", stats->si_pid);\n kill(stats->si_pid, SIGUSR2);\n }\n\n if (LEFTREQUESTS == 0)\n {\n for (int i=0; i<REQUESTS; i++)\n {\n if (OPTIONS & PERM) printf(\"Permission given: PID = %d\\n\", PENDING[i]);\n kill(PENDING[i], SIGUSR2);\n }\n }\n\n break;\n }\n\n default:\n {\n if (sig >= SIGRTMIN && sig <= SIGRTMAX)\n {\n if (OPTIONS & SIG) printf(\"Received real-time signal: PID = %d, Signal number = %d\\n\", stats->si_pid, sig);\n }\n else\n {\n printf(\"Unknown signal\\n\");\n exit(-1);\n }\n\n break;\n }\n }\n}\n\n\n\nvoid childLabour(sigset_t mask)\n{\n srand(time(NULL) ^ (getpid()<<16));\n int sleepLength = rand()%11;\n sleep(sleepLength);\n\n sigset_t newMask = mask;\n sigdelset(&newMask, SIGUSR2);\n usleep((rand()%1000)*1000);\n kill(getppid(), SIGUSR1);\n\n sigsuspend(&newMask);\n exit(sleepLength);\n}\n\n\n\nvoid makeChildren(int numberOfChildren)\n{\n pid_t child = 1;\n\n for (int i=0; i<numberOfChildren && child>0; i++)\n {\n child = fork();\n if ((OPTIONS & PID) && (child > 0)) printf(\"Creating process %d: PID = %d\\n\", i+1, child);\n if (child == 0) usleep(1e5);\n }\n\n sigset_t fullMask;\n sigfillset(&fullMask);\n\n struct sigaction act;\n act.sa_sigaction = signalHandling;\n act.sa_mask = fullMask;\n act.sa_flags = SA_SIGINFO;\n\n if (child < 0)\n {\n printf(\"Fork error\");\n exit(-1);\n }\n else if (child == 0)\n {\n sigaction(SIGUSR2, &act, NULL);\n childLabour(fullMask);\n }\n else\n {\n sigdelset(&act.sa_mask, SIGINT);\n sigaction(SIGUSR1, &act, NULL);\n sigaction(SIGINT, &act, NULL);\n for (int i=0; i<32; i++)\n {\n sigaction(SIGRTMIN+i, &act, NULL);\n }\n\n int children = numberOfChildren;\n int result;\n pid_t childPID;\n\n struct exitStatus\n {\n pid_t pid;\n int status;\n };\n\n struct exitStatus* exitArray = calloc(numberOfChildren, sizeof(exitArray));\n struct exitStatus oneProcess;\n\n while (children > 0)\n {\n if ((childPID = wait(&result)) > 0)\n {\n oneProcess.pid = childPID;\n oneProcess.status = WEXITSTATUS(result);\n exitArray[numberOfChildren - children] = oneProcess;\n children--;\n }\n }\n\n if (OPTIONS & KILL)\n {\n for (int i = 0; i < numberOfChildren; i++)\n {\n printf(\"Exiting process: PID = %d, Status = %d\\n\", exitArray[i].pid, exitArray[i].status);\n }\n }\n }\n}\n\n\n\nint parse(int argc, char** argv)\n{\n if (argc < 3) return -1;\n if (argc > 8) return -1;\n int numberOfChildren;\n int numberOfRequests;\n if (sscanf(argv[1], \"%d\", &numberOfChildren) < 1) return -1;\n if (sscanf(argv[2], \"%d\", &numberOfRequests) < 1) return -1;\n\n char** stats = calloc(5, sizeof(char*));\n for (int i=3; argv[i]!=NULL; i++)\n {\n char* arg = argv[i];\n if (strcmp(arg, \"pid\") && strcmp(arg, \"ask\") && strcmp(arg, \"perm\") && strcmp(arg, \"sig\") && strcmp(arg, \"kill\")) return -1;\n stats[i-3] = calloc(strlen(argv[i]), sizeof(char));\n strcpy(stats[i-3], argv[i]);\n }\n\n LEFTREQUESTS = REQUESTS = numberOfRequests;\n PENDING = calloc(LEFTREQUESTS, sizeof(pid_t));\n OPTIONS = countEnumValue(stats, argc-3);\n\n makeChildren(numberOfChildren);\n\n return 0;\n}\n\n\n\nint main(int argc, char** argv)\n{\n if (parse(argc, argv) == -1)\n {\n printf(\"Parsing error\");\n exit(-1);\n }\n}\n" }, { "alpha_fraction": 0.5260134935379028, "alphanum_fraction": 0.537162184715271, "avg_line_length": 19.97163200378418, "blob_id": "73e171b5862e97c2c64bed799d41a9e572ba7908", "content_id": "a78c43f9c6d764f2a4a45011130a81e31f889d50", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2960, "license_type": "permissive", "max_line_length": 77, "num_lines": 141, "path": "/SysOpy/lab6/src/zad1/client.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "\n#include <sys/types.h>\n#include <sys/ipc.h>\n#include <sys/msg.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <unistd.h>\n#include <time.h>\n#include <string.h>\n#include <signal.h>\n\n\n#include \"properties.h\"\n\n\n\nstruct queueInfo\n{\n key_t key;\n int queue;\n};\n\nstruct queueInfo init()\n{\n srand(time(0));\n key_t key = ftok(getenv(\"HOME\"), (char)rand()%(256));\n int queue = msgget(key, IPC_CREAT | 0622);\n struct queueInfo info = {key, queue};\n return info;\n}\n\n\nstruct msgbuf {\n long mtype;\n pid_t pid;\n int key;\n char mtext[MAX_MSG_LENGTH];\n};\n\n\n\nkey_t connect(struct queueInfo info)\n{\n key_t key = ftok(getenv(\"HOME\"), KEY_CHAR);\n int queue = msgget(key, 0);\n struct msgbuf msg;\n msg.mtype = INIT;\n msg.pid = getpid();\n msg.key = info.key;\n msgsnd(queue, &msg, MAX_MSG_LENGTH, 0);\n\n while(!msgrcv(info.queue, &msg, MAX_MSG_LENGTH, 0, MSG_NOERROR));\n printf(\"Connected!\\tQueueID = %d\\n\", info.queue);\n return key;\n}\n\n\n\nvoid sendMessages(FILE* fd, key_t key, struct queueInfo info)\n{\n int queue = msgget(key, 0);\n struct msgbuf msg;\n msg.pid = getpid();\n msg.key = 0;\n char* buffer = calloc(MAX_MSG_LENGTH, sizeof(char));\n size_t length = MAX_MSG_LENGTH;\n char* arguments[2];\n\n while(1)\n {\n getline(&buffer, &length, fd);\n\n arguments[0] = strtok(buffer, \" \\n\");\n arguments[1] = strtok(NULL, \"\\n\");\n\n if (arguments[0] == NULL) continue;\n\n\n if (!strcmp(arguments[0], \"TIME\"))\n {\n msg.mtype = TIME;\n msgsnd(queue, &msg, MAX_MSG_LENGTH, 0);\n\n while(!msgrcv(info.queue, &msg, MAX_MSG_LENGTH, 0, MSG_NOERROR));\n printf(\"%s\", msg.mtext);\n }\n\n else if (!strcmp(arguments[0], \"END\"))\n {\n msg.mtype = END;\n msgsnd(queue, &msg, MAX_MSG_LENGTH, 0);\n // struct msqid_ds *buf = NULL;\n msgctl(info.queue, IPC_RMID, NULL);\n break;\n }\n\n else if (!strcmp(arguments[0], \"CALC\"))\n {\n msg.mtype = CALC;\n strcpy(msg.mtext, arguments[1]);\n msgsnd(queue, &msg, MAX_MSG_LENGTH, 0);\n\n\n while(!msgrcv(info.queue, &msg, MAX_MSG_LENGTH, 0, MSG_NOERROR));\n printf(\"%s\\n\", msg.mtext);\n\n }\n\n else if (!strcmp(arguments[0], \"MIRROR\"))\n {\n msg.mtype = MIRROR;\n strcpy(msg.mtext, arguments[1]);\n msgsnd(queue, &msg, MAX_MSG_LENGTH, 0);\n\n while(!msgrcv(info.queue, &msg, MAX_MSG_LENGTH, 0, MSG_NOERROR));\n printf(\"%s\\n\", msg.mtext);\n }\n }\n}\n\n\nvoid sender(int argc, char** argv)\n{\n struct queueInfo info = init();\n key_t key = connect(info);\n\n if (argc == 1)\n {\n sendMessages(stdin, key, info);\n }\n else\n {\n FILE* file = fopen(argv[1], \"r\");\n sendMessages(file, key, info);\n }\n}\n\n\nint main(int argc, char** argv)\n{\n sender(argc, argv);\n}\n\n\n" }, { "alpha_fraction": 0.4971528649330139, "alphanum_fraction": 0.5168637633323669, "avg_line_length": 20.84688949584961, "blob_id": "5937ded293141d67281b21ed5cf6db9d99925bbe", "content_id": "e45141661992d4b76a197ed4ee818c56af6173b1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4566, "license_type": "permissive", "max_line_length": 180, "num_lines": 209, "path": "/SysOpy/lab8/src/zad1/main.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <memory.h>\n#include <pthread.h>\n#include <math.h>\n\n\nint** INPUT;\nint** OUTPUT;\ndouble** FILTER;\nint WIDTH;\nint HEIGHT;\nint NUM_OF_THREADS;\nint FILTER_SIZE;\nint* FINISHED;\n\n\nint** parseImage(char* fileName)\n{\n FILE* file = fopen(fileName, \"r\");\n\n char* buf = calloc(10, sizeof(char));\n fgets(buf, 10, file);\n\n fgets(buf, 10, file);\n WIDTH = atoi(strtok(buf, \" \"));\n HEIGHT = atoi(strtok(NULL, \"\\n\\0\"));\n\n fgets(buf, 10, file);\n free(buf);\n\n int** result;\n result = calloc(HEIGHT, sizeof(int*));\n for (int i=0; i<HEIGHT; i++)\n result[i] = calloc(WIDTH, sizeof(int));\n\n char* line = calloc(128, sizeof(char));\n int i = 0;\n while(fgets(line, 128, file))\n {\n result[i/WIDTH][i%WIDTH] = atoi(strtok(line, \" \\n\"));\n i++;\n char* buf;\n while((buf = strtok(NULL, \" \\n\")) != NULL)\n {\n result[i/WIDTH][i%WIDTH] = atoi(buf);\n i++;\n }\n }\n\n fclose(file);\n free(line);\n\n return result;\n}\n\n\ndouble** parseFilter(char* fileName)\n{\n FILE* file = fopen(fileName, \"r\");\n\n char* buf = calloc(10, sizeof(char));\n fgets(buf, 10, file);\n FILTER_SIZE = atoi(buf);\n\n double** result;\n result = calloc(FILTER_SIZE, sizeof(double*));\n for (int i=0; i<FILTER_SIZE; i++)\n result[i] = calloc(FILTER_SIZE, sizeof(double));\n\n char* line = calloc(128, sizeof(char));\n char* lineCpy = calloc(128, sizeof(char));\n int i = 0;\n while(fgets(line, 128, file) != NULL && i<FILTER_SIZE*FILTER_SIZE)\n {\n\tstrcpy(lineCpy, line);\n sscanf(strtok(lineCpy, \" \\n\"), \"%lf\", &result[i/FILTER_SIZE][i%FILTER_SIZE]);\n i++;\n char* buf;\n while((buf = strtok(NULL, \" \\n\")) != NULL)\n {\n sscanf(buf, \"%lf\", &result[i/FILTER_SIZE][i%FILTER_SIZE]);\n i++;\n }\n }\n\n fclose(file);\n free(line);\n\n return result;\n}\n\n\nvoid* calculate(void* segment)\n{\n int c = ceil(FILTER_SIZE*1.0/2);\n int maxWidth = (*(int*)segment + 1 != NUM_OF_THREADS)?(((*(int*)segment)+1) * WIDTH/NUM_OF_THREADS):WIDTH;\n\n for (int x=1; x<=HEIGHT; x++)\n {\n for (int y=(*(int*)(segment) * WIDTH/NUM_OF_THREADS)+1; y<=maxWidth; y++)\n {\n double result = 0;\n for (int i=1; i<=FILTER_SIZE; i++)\n {\n for (int j=1; j<=FILTER_SIZE; j++)\n {\n result += (((1>=x-c+i)?1:(x-c+i))-1 < HEIGHT && ((1>=y-c+j)?1:(y-c+j))-1 < WIDTH)?INPUT[((1>=x-c+i)?1:(x-c+i))-1][((1>=y-c+j)?1:(y-c+j))-1]*FILTER[i-1][j-1]:0;\n }\n }\n OUTPUT[x-1][y-1] = round(result);\n }\n }\n\n FINISHED[*(int*)segment] = 1;\n\n return 0;\n}\n\n\nvoid doSomeWork()\n{\n OUTPUT = calloc(HEIGHT, sizeof(int*));\n for (int i=0; i<HEIGHT; i++)\n OUTPUT[i] = calloc(WIDTH, sizeof(int));\n\n pthread_t* threads = calloc(NUM_OF_THREADS, sizeof(pthread_t));\n\n struct timespec st, en;\n\n clock_gettime(CLOCK_REALTIME, &st);\n\n int** ints = calloc(NUM_OF_THREADS, sizeof(int*));\n for (int i=0; i<NUM_OF_THREADS; i++)\n {\n ints[i] = calloc(1, sizeof(int));\n *ints[i] = i;\n }\n\n for (int i=0; i<NUM_OF_THREADS; i++)\n {\n pthread_create(&threads[i], NULL, calculate, ints[i]);\n }\n\n while(1)\n {\n int flag = 1;\n for (int i=0; i<NUM_OF_THREADS; i++)\n if (FINISHED[i] == 0) flag = 0;\n if (flag == 1)\n break;\n }\n\n clock_gettime(CLOCK_REALTIME, &en);\n\n free(threads);\n free(ints);\n printf(\"%lf \\n\", (en.tv_sec + en.tv_nsec*1e-9 -st.tv_sec - st.tv_nsec*1e-9));\n}\n\n\nvoid writeToFile(char* fileName)\n{\n FILE *output = fopen(fileName, \"w\");\n\n fprintf(output, \"P2\\n\");\n fprintf(output, \"%d %d\\n\",WIDTH, HEIGHT);\n fprintf(output, \"255\\n\");\n\n for (int i=0; i<HEIGHT; i++)\n {\n for (int j=0; j<WIDTH; j++)\n {\n fprintf(output, \"%d \", OUTPUT[i][j]);\n }\n fprintf(output, \"\\n\");\n }\n\n fclose(output);\n}\n\n\nint main(int argc, char** argv)\n{\n if(argc != 5) return -1;\n NUM_OF_THREADS = atoi(argv[1]);\n char* inputImage = argv[2];\n char* filterFile = argv[3];\n char* outputImage = argv[4];\n\n INPUT = parseImage(inputImage);\n\n FILTER = parseFilter(filterFile);\n\n FINISHED = calloc(NUM_OF_THREADS, sizeof(int));\n\n for (int i=0; i<NUM_OF_THREADS; i++)\n FINISHED[i] = 0;\n\n doSomeWork();\n\n writeToFile(outputImage);\n\n free(INPUT);\n free(OUTPUT);\n free(FILTER);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.5681818127632141, "alphanum_fraction": 0.5909090638160706, "avg_line_length": 6, "blob_id": "b850fc43e5218fe553466f18c1caeabf085151ff", "content_id": "ed1643919e16bc8f673343bc32689d182779c41e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 44, "license_type": "permissive", "max_line_length": 17, "num_lines": 6, "path": "/SysOpy/lab3/src/zad1/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "\n\nall:\n\tgcc -Wall zad1.c\n\n\nclean:\n\trm a.out\n" }, { "alpha_fraction": 0.408231645822525, "alphanum_fraction": 0.42474275827407837, "avg_line_length": 19.586206436157227, "blob_id": "5845170cb7f046c6fb947d019a2be4e07ead8745", "content_id": "694fadc901328c1a4d35f74e1ae1ac2626991b43", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4179, "license_type": "permissive", "max_line_length": 78, "num_lines": 203, "path": "/SysOpy/lab4/src/zad3/zad3.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <unistd.h>\n#include <stdlib.h>\n#include <signal.h>\n#include <sys/wait.h>\n\n\npid_t CHILD;\nconst int SIGRT1 = 1;\nconst int SIGRT2 = 2;\n\nint SENT = 0;\nint RECEIVED_CHILD = 0;\nint RECEIVED_PARENT = 0;\n\n\n\nvoid signalHandling(int sig)\n{\n switch (sig)\n {\n case SIGINT:\n {\n printf(\"Terminated\\n\");\n kill(CHILD, SIGUSR2);\n raise(SIGKILL);\n\n break;\n }\n\n case SIGUSR1:\n {\n if (CHILD > 0)\n {\n RECEIVED_PARENT++;\n }\n else\n {\n RECEIVED_CHILD ++;\n kill(getppid(), SIGUSR1);\n }\n\n break;\n }\n\n case SIGUSR2:\n {\n printf(\"Recieved_Child = %d\\n\", RECEIVED_CHILD);\n raise(SIGKILL);\n\n break;\n }\n\n default:\n {\n if (sig != SIGRTMIN+SIGRT1 && sig != SIGRTMIN+SIGRT2)\n {\n printf(\"Unknown signal\\n\");\n exit(-1);\n }\n\n if (sig == SIGRTMIN+SIGRT1)\n {\n if (CHILD > 0)\n {\n RECEIVED_PARENT++;\n }\n else\n {\n RECEIVED_CHILD++;\n kill(getppid(), SIGRTMIN+SIGRT1);\n }\n }\n else\n {\n if (CHILD > 0)\n {\n RECEIVED_PARENT++;\n }\n else\n {\n RECEIVED_CHILD++;\n kill(getppid(), SIGRTMIN+SIGRT2);\n }\n }\n\n break;\n }\n }\n}\n\n\n\nvoid licenceToKill(int numberOfSignals, int type)\n{\n sigset_t fullMask;\n sigfillset(&fullMask);\n\n struct sigaction act;\n act.sa_handler = signalHandling;\n act.sa_mask = fullMask;\n act.sa_flags = 0;\n\n if ((CHILD = fork()) < 0)\n {\n printf(\"Fork error\\n\");\n exit(-1);\n }\n else if (CHILD == 0)\n {\n if (type < 3) sigaction(SIGUSR1, &act, NULL);\n sigaction(SIGUSR2, &act, NULL);\n if (type == 3)\n {\n sigaction(SIGRTMIN + SIGRT1, &act, NULL);\n sigaction(SIGRTMIN + SIGRT2, &act, NULL);\n }\n\n sigset_t mask;\n sigfillset(&mask);\n if (type < 3)sigdelset(&mask, SIGUSR1);\n sigdelset(&mask, SIGUSR2);\n if (type == 3)\n {\n sigdelset(&mask, SIGRTMIN + SIGRT1);\n sigdelset(&mask, SIGRTMIN + SIGRT2);\n }\n\n if (sigprocmask(SIG_SETMASK, &mask, NULL) < 0)\n {\n printf(\"Mask error\");\n exit(-1);\n }\n\n while(1)\n {\n pause();\n }\n }\n else\n {\n sigdelset(&act.sa_mask, SIGINT);\n sigaction(SIGUSR1, &act, NULL);\n sigaction(SIGINT, &act, NULL);\n if (type == 3)\n {\n sigaction(SIGRTMIN + SIGRT1, &act, NULL);\n sigaction(SIGRTMIN + SIGRT2, &act, NULL);\n }\n\n sleep(1); //child must prepare for signals\n\n if (type < 3)\n {\n for (int i = 0; i < numberOfSignals; i++)\n {\n SENT++;\n kill(CHILD, SIGUSR1);\n if (type ==2 ) pause();\n }\n }\n else\n {\n kill(CHILD, SIGRTMIN+SIGRT1);\n kill(CHILD, SIGRTMIN+SIGRT2);\n SENT = 2;\n sleep(1); // to not interrupt signal processing with SIGKILL\n }\n\n kill(CHILD, SIGUSR2);\n waitpid(CHILD, NULL, 0);\n }\n}\n\n\n\nint parse(int argc, char** argv)\n{\n if (argc != 3) return -1;\n int numberOfSignals;\n int type;\n if (sscanf(argv[1], \"%d\", &numberOfSignals) < 1) return -1;\n if (sscanf(argv[2], \"%d\", &type) < 1) return -1;\n if (type < 1 || type > 3) return -1;\n licenceToKill(numberOfSignals, type);\n\n printf(\"Sent: %d\\nRecieved_Parent: %d\\n\", SENT, RECEIVED_PARENT);\n\n return 0;\n}\n\n\n\nint main(int argc, char** argv)\n{\n if (parse(argc, argv) != 0)\n {\n printf(\"Error\");\n return -1;\n }\n\n return 0;\n}\n" }, { "alpha_fraction": 0.6959459185600281, "alphanum_fraction": 0.6959459185600281, "avg_line_length": 15.333333015441895, "blob_id": "8c9cb82ad39688e0cd7610e5d3e96a512741156f", "content_id": "52a2dbfa3fb340075d44bc4636ffd4ae8f148e97", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 148, "license_type": "permissive", "max_line_length": 39, "num_lines": 9, "path": "/SysOpy/lab10/src/zad2/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all: server.c client.c\n\trm -f test\n\tgcc -Wall server.c -o server -lpthread\n\tgcc -Wall client.c -o client -lpthread\n\n\nclean: \n\trm client\n\trm server\n\n" }, { "alpha_fraction": 0.4767441749572754, "alphanum_fraction": 0.5310077667236328, "avg_line_length": 11.899999618530273, "blob_id": "146d9ec25d67cea809b536dd0070d16a20aaad72", "content_id": "42c029f4fd23bbf892a0f84ed90395b3fa8edaf7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 258, "license_type": "permissive", "max_line_length": 46, "num_lines": 20, "path": "/SysOpy/lab3/src/zad3b/zad3b.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <string.h>\n\nint main(int argc, char** argv)\n{\n\tif (!strcmp(argv[1], \"0\"))\n\t{\n\t\twhile(1){}\n\t}\n\telse\n\t{\n\t\twhile(1)\n\t\t{\n\t\t\tdouble* buf = calloc(1000, sizeof(double));\n\t\t\tfor (int i=0; i<1000; i++)\n\t\t\tbuf[i] = i;\n\t\t}\n\t}\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.46835029125213623, "alphanum_fraction": 0.48671695590019226, "avg_line_length": 23.780487060546875, "blob_id": "d0fb193ac66d13299a0ef786b60fe0165c816ad6", "content_id": "57ac36bdf5994d6d6065b24a87884de5a0c51b72", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3049, "license_type": "permissive", "max_line_length": 95, "num_lines": 123, "path": "/SysOpy/lab3/src/zad3a/zad3a.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "\n#include <stdio.h>\n#include <stdlib.h>\n#include <memory.h>\n#include <libgen.h>\n#include <unistd.h>\n#include <sys/wait.h>\n#include <sys/resource.h>\n\n\nvoid readAndExecute(char* fileName, int time, int memory)\n{\n FILE* file = fopen(fileName, \"r\");\n if (file == NULL)\n {\n printf(\"File opening error\");\n exit(-1);\n }\n\n size_t bufSize = 100;\n char* buffer = calloc(bufSize, sizeof(char));\n struct rlimit limits;\n\n while (getline(&buffer, &bufSize, file) != -1)\n {\n\n char* bufferCopy = calloc(bufSize, sizeof(char));\n strcpy(bufferCopy, buffer);\n\n char** arguments = NULL;\n char* p = strtok(bufferCopy, \" \");\n int n_spaces = 0;\n\n while(p)\n {\n arguments = realloc(arguments, sizeof(char*)* ++n_spaces);\n if (arguments == NULL) exit(-1);\n\n arguments[n_spaces-1] = p;\n p = strtok(NULL, \" \");\n }\n\n arguments = realloc(arguments, sizeof(char*) * (n_spaces + 1));\n arguments[n_spaces] = 0;\n\n for (int i=0; i<=n_spaces; i++)\n strtok(arguments[i], \"\\n\");\n\n int result = 0;\n pid_t child = vfork();\n\n if (child < 0) exit(-1);\n else if (child == 0)\n {\n limits.rlim_max = (rlim_t)time;\n limits.rlim_cur = (rlim_t)time;//RLIM_INFINITY;\n setrlimit(RLIMIT_CPU, &limits);\n\n limits.rlim_max = (rlim_t)memory * 1048576;\n limits.rlim_cur = (rlim_t)memory * 1048576;//LIM_INFINITY;\n setrlimit(RLIMIT_AS, &limits);\n\n\n if (execvp(arguments[0], arguments) == -1)\n {\n if (execv(basename(arguments[0]), arguments) == -1)\n {\n exit(-1);\n }\n }\n exit(0);\n }\n else\n {\n struct rusage stats;\n wait3(&result, 0, &stats);\n if (WIFEXITED(result) && WEXITSTATUS(result) != 0)\n {\n printf(\"!!!Operation terminated!!!\");\n exit(-1);\n }\n else\n {\n if (WIFSIGNALED(result) != 0) printf(\"Killed by signal %d\\n\",WTERMSIG(result));\n else\n {\n printf(\"Resources:\\tUser time: %f\\tSystem time: %f\\n\\n\",\n stats.ru_utime.tv_usec * 1.0 / 1e6 + stats.ru_utime.tv_sec,\n stats.ru_stime.tv_usec * 1.0 / 1e6 + stats.ru_stime.tv_sec);\n }\n }\n }\n\n free(bufferCopy);\n free(arguments);\n }\n\n free(buffer);\n fclose(file);\n}\n\n\nint parse(int argc, char** argv)\n{\n if (argc != 4) return -1;\n char* fileName = argv[1];\n int time = atoi(argv[2]);\n int memory = atoi(argv[3]);\n readAndExecute(fileName, time, memory);\n return 0;\n}\n\n\nvoid printHelp()\n{\n printf(\"Possible arguments:\\n\\tfilePath timeLimit[s] memoryLimit[MB]\");\n}\n\n\nint main(int argc, char** argv)\n{\n if (parse(argc, argv) != 0) printHelp();\n return 0;\n}\n" }, { "alpha_fraction": 0.4725050926208496, "alphanum_fraction": 0.4821792244911194, "avg_line_length": 22.10588264465332, "blob_id": "41f02e369dc9870121a9bb7a4653370f9d4f6d78", "content_id": "a6a45cc23b3fb986d88736ccb308ee67106afd15", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3928, "license_type": "permissive", "max_line_length": 99, "num_lines": 170, "path": "/SysOpy/lab7/src/zad2/client.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <sys/param.h>\n#include <sys/sem.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <sys/msg.h>\n#include <sys/wait.h>\n#include <unistd.h>\n#include <time.h>\n#include <sys/mman.h>\n#include <fcntl.h>\n#include <semaphore.h>\n\n\nenum status\n{\n SLEEP,\n WAKES,\n SITS,\n SITS_FIRST,\n START,\n END,\n LEAVES,\n INVITES\n};\n\n\nstruct data\n{\n int waitingClients;\n int queueLength;\n enum status status;\n int clientPid;\n int barberPid;\n};\n\n\nstruct data* cMemPtr;\nsem_t* sem;\nint queue;\nint commonMemory;\n\n\nlong getTime()\n{\n struct timespec times;\n\n clock_gettime(CLOCK_MONOTONIC, &times);\n return times.tv_nsec;\n}\n\n\npid_t client;\n\nvoid clientLogic(int numberOfShaves)\n{\n struct sembuf* sops = calloc(1, sizeof(struct sembuf));\n sops[0].sem_flg = 0;\n\n struct msgbuf\n {\n long mtype;\n pid_t pid;\n char mtext[1];\n };\n\n struct msgbuf msg;\n msg.mtype = 1;\n\n msg.pid = getpid();\n\n\n for (int i=0; i<numberOfShaves; i++)\n {\n while(1)\n {\n usleep(1000);\n sem_wait(sem);\n\n\n\n if (cMemPtr->status == SLEEP && cMemPtr->clientPid == 0 && client == 0)\n {\n cMemPtr->clientPid = getpid();\n cMemPtr->status = WAKES;\n printf(\"Client wakes the barber up\\t%d\\t%ld\\n\", getpid(), getTime());\n sem_post(sem);\n continue;\n }\n else if (cMemPtr->status == SITS_FIRST && cMemPtr->clientPid == getpid())\n {\n cMemPtr->status = START;\n client = getpid();\n printf(\"Client sits on the chair\\t%d\\t%ld\\n\", getpid(), getTime());\n sem_post(sem);\n continue;\n }\n else if (cMemPtr->status == SITS && cMemPtr->clientPid == getpid())\n {\n cMemPtr->status = START;\n cMemPtr->waitingClients--;\n printf(\"Client sits on the chair\\t%d\\t%ld\\n\", getpid(), getTime());\n sem_post(sem);\n continue;\n }\n else if (cMemPtr->status == LEAVES && cMemPtr->clientPid == getpid())\n {\n cMemPtr->status = INVITES;\n cMemPtr->clientPid = 0;\n client = 0;\n printf(\"Client leaves\\t\\t\\t%d\\t%ld\\n\", getpid(), getTime());\n sem_post(sem);\n break;\n }\n else if (client == 0 && cMemPtr->clientPid != 0)\n {\n if (cMemPtr->queueLength <= cMemPtr->waitingClients)\n {\n printf(\"Client leaves due to the full queue\\t%d\\t%ld\\n\", getpid(), getTime());\n sem_post(sem);\n break;\n } else {\n client = getpid();\n msg.pid = getpid();\n cMemPtr->waitingClients++;\n msgsnd(queue, &msg, 10, 0);\n printf(\"Client sits in the waiting room\\t%d\\t%ld\\n\", getpid(), getTime());\n }\n sem_post(sem);\n continue;\n }\n else\n {\n sem_post(sem);\n }\n }\n }\n exit(0);\n}\n\n\nvoid makeChildren(int numOfClients, int numOfShaves)\n{\n client = 0;\n\n for (int i=0; i<numOfClients; i++)\n {\n if (fork() == 0)\n {\n clientLogic(numOfShaves);\n }\n }\n\n while (wait(NULL) > 0);\n}\n\n\nint main(int argc, char** argv)\n{\n commonMemory = shm_open(\"mem\", O_RDWR, 0622);\n ftruncate(commonMemory, sizeof(struct data));\n cMemPtr = mmap(NULL, sizeof(struct data), PROT_READ | PROT_WRITE, MAP_SHARED, commonMemory, 0);\n\n key_t key = ftok(getenv(\"HOME\"), 'c');\n queue = msgget(key, 0);\n// printf(\"%d\\n\", queue);\n\n sem = sem_open(\"sem\", O_RDWR, 0622, 1);\n\n makeChildren(atoi(argv[1]), atoi(argv[2]));\n}\n" }, { "alpha_fraction": 0.5024430155754089, "alphanum_fraction": 0.5225298404693604, "avg_line_length": 21.5950927734375, "blob_id": "b3504162eb7924240cafc115d22d3d897882c216", "content_id": "11cc0db597a8f9faa9563e14d812751b425989d7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3684, "license_type": "permissive", "max_line_length": 74, "num_lines": 163, "path": "/SysOpy/lab10/src/zad2/client.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <sys/socket.h>\n#include <string.h>\n#include <stdio.h>\n#include <unistd.h>\n#include <netinet/in.h>\n#include <arpa/inet.h>\n#include <stdlib.h>\n#include <sys/un.h>\n#include <stddef.h>\n\n\nint calculate(char* e)\n{\n int arg1 = atoi(strtok(e, \" \"));\n char* op = strtok(NULL, \" \");\n int arg2 = atoi(strtok(NULL, \" \\n\"));\n\n switch(op[0])\n {\n case '+': return arg1 + arg2;\n case '-': return arg1 - arg2;\n case '*': return arg1 * arg2;\n case '/': return arg1 / arg2;\n default: return -1;\n }\n}\n\n\nvoid receive(int fd, char* name)\n{\n char recvBuff[128];\n\n char* msg = calloc(128, sizeof(char));\n strcpy(msg, \"1\");\n strcat(msg, name);\n write(fd, msg, strlen(msg));\n free(msg);\n\n// perror(\"write\");\n\n read(fd, recvBuff, sizeof(recvBuff));\n\n// perror(\"read\");\n\n if (recvBuff[0] == '1' && (recvBuff[1] == 'N' || recvBuff[1] == 'Y'))\n {\n if (recvBuff[1] == 'N')\n {\n printf(\"Given name already exists in cluster\\n\");\n return;\n }\n else\n {\n printf(\"Connected to cluster\\n\");\n }\n }\n else\n {\n printf(\"%s\\n\", recvBuff);\n printf(\"Unknown message\\n\");\n return;\n }\n\n while (1)\n {\n // perror(\"start\");\n read(fd, recvBuff, sizeof(recvBuff));\n // perror(\"rec\");\n if (recvBuff[0] == '3')\n {\n perror(\"calc\");\n char* result = calloc(10, sizeof(char));\n int p = (int)recvBuff[1];\n printf(\"Calc request: %s\", recvBuff + 2 * sizeof(char));\n sprintf(result, \"%d\", calculate(recvBuff + 2 * sizeof(char)));\n\n char* msg = calloc(128, sizeof(char));\n char pos[] = {(char)p, '\\0'};\n strcpy(msg, \"3\");\n strcat(msg, pos);\n strcat(msg, result);\n\n write(fd, msg, 128);\n free(msg);\n }\n else if (recvBuff[0] == '2')\n {\n// printf(\"pong\\n\");\n write(fd, \"2\", 1);\n }\n else if (recvBuff[0] == '5')\n {\n write(fd, recvBuff + sizeof(char), 128);\n }\n recvBuff[0] = '0';\n\n usleep(1000);\n }\n}\n\n\nvoid initNet(char* address, char* name)\n{\n char* addr = strtok(address, \":\");\n int port = atoi(strtok(NULL, \"\\0\\n\"));\n int sockfd = 0;\n struct sockaddr_in serv_addr;\n\n sockfd = socket(AF_INET, SOCK_DGRAM, 0);\n\n int option = 1;\n setsockopt(sockfd, SOL_SOCKET, SO_PASSCRED, &option, sizeof(option));\n\n// perror(\"socket\");\n\n serv_addr.sin_family = AF_INET;\n serv_addr.sin_port = htons(5000);\n inet_pton(AF_INET, addr, &serv_addr.sin_addr);\n\n // bind(sockfd, (struct sockaddr*)&serv_addr, sizeof(serv_addr));\n// perror(\"bind\");\n connect(sockfd, (struct sockaddr *)&serv_addr, sizeof(serv_addr));\n\n\n receive(sockfd, name);\n}\n\n\nvoid initLocal(char* path, char* name)\n{\n int sockfd = 0;\n struct sockaddr_un serv_addr;\n\n sockfd = socket(AF_UNIX, SOCK_DGRAM, 0);\n int option = 1;\n setsockopt(sockfd, SOL_SOCKET, SO_PASSCRED, &option, sizeof(option));\n// perror(\"socket\");\n\n\n strcpy(serv_addr.sun_path, path);\n serv_addr.sun_family = AF_UNIX;\n\n bind(sockfd, (struct sockaddr*)&serv_addr, sizeof(sa_family_t));\n// perror(\"bind\");\n\n connect(sockfd, (struct sockaddr *)&serv_addr, sizeof(serv_addr));\n\n// perror(\"connect\");\n\n receive(sockfd, name);\n}\n\nint main(int argc, char** argv)\n{\n if (argc != 4) return -1;\n char* name = argv[1];\n int mode = atoi(argv[2]);\n char* address = argv[3];\n\n if (mode == 0) initNet(address, name);\n if (mode == 1) initLocal(address, name);\n return 0;\n}\n\n" }, { "alpha_fraction": 0.46596357226371765, "alphanum_fraction": 0.4803451597690582, "avg_line_length": 20.070707321166992, "blob_id": "0fde23cd16f89b55056babb0c0ea4ee757b31943", "content_id": "0f10dac5c9c7f62674430866f66ee8b07ffaadf3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2086, "license_type": "permissive", "max_line_length": 83, "num_lines": 99, "path": "/SysOpy/lab3/src/zad2/zad2.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <memory.h>\n#include <libgen.h>\n#include <unistd.h>\n#include <sys/wait.h>\n\n\nvoid readAndExecute(char* fileName)\n{\n FILE* file = fopen(fileName, \"r\");\n if (file == NULL)\n {\n printf(\"File opening error\");\n exit(-1);\n }\n\n size_t bufSize = 100;\n char* buffer = calloc(bufSize, sizeof(char));\n\n while (getline(&buffer, &bufSize, file) != -1)\n {\n\n char* bufferCopy = calloc(bufSize, sizeof(char));\n strcpy(bufferCopy, buffer);\n\n char** arguments = NULL;\n char* p = strtok(bufferCopy, \" \");\n int n_spaces = 0;\n\n while(p)\n {\n arguments = realloc(arguments, sizeof(char*)* ++n_spaces);\n if (arguments == NULL) exit(-1);\n\n arguments[n_spaces-1] = p;\n p = strtok(NULL, \" \");\n }\n\n arguments = realloc(arguments, sizeof(char*) * (n_spaces + 1));\n arguments[n_spaces] = 0;\n\n for (int i=0; i<=n_spaces; i++)\n strtok(arguments[i], \"\\n\");\n\n int result = 0;\n pid_t child = vfork();\n\n if (child < 0) exit(-1);\n else if (child == 0)\n {\n if (execv(arguments[0], arguments) == -1)\n {\n if (execvp(basename(arguments[0]), arguments) == -1)\n {\n exit(-1);\n }\n }\n exit(0);\n }\n else\n {\n wait(&result);\n if (WIFEXITED(result) && WEXITSTATUS(result) != 0)\n {\n printf(\"!!!Operation terminated!!!\\nError in: %s\\n\", arguments[0]);\n exit(-1);\n }\n }\n\n free(bufferCopy);\n free(arguments);\n }\n\n free(buffer);\n fclose(file);\n}\n\n\nint parse(int argc, char** argv)\n{\n if (argc != 2) return -1;\n char* fileName = argv[1];\n readAndExecute(fileName);\n return 0;\n}\n\n\nvoid printHelp()\n{\n printf(\"Possible arguments:\\n\\tfilePath\\n\");\n}\n\n\nint main(int argc, char** argv)\n{\n if (parse(argc, argv) != 0) printHelp();\n return 0;\n}\n" }, { "alpha_fraction": 0.6041666865348816, "alphanum_fraction": 0.6458333134651184, "avg_line_length": 8.600000381469727, "blob_id": "23bb92c57bb99a7051943a6516705c19034a8bd0", "content_id": "7052c2e153264004469955ac65d2a69f4e79633c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 48, "license_type": "permissive", "max_line_length": 17, "num_lines": 5, "path": "/SysOpy/lab2/src/zad1/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all: zad1.c\n\tgcc -Wall zad1.c\n\nclean:\n\trm a.out\n" }, { "alpha_fraction": 0.5454333424568176, "alphanum_fraction": 0.5573159456253052, "avg_line_length": 20.34328269958496, "blob_id": "a771f6342f633dce88dffccbf02680aa7b6fa38f", "content_id": "dd9df0202c4515f21636224b5e4173b0d0b5997a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4292, "license_type": "permissive", "max_line_length": 101, "num_lines": 201, "path": "/SysOpy/lab6/src/zad2/client.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "\n#include <sys/types.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <time.h>\n#include <string.h>\n#include <mqueue.h>\n#include <unistd.h>\n\n\n#include \"properties.h\"\n\n\n\nstruct queueInfo\n{\n char* name;\n mqd_t queue;\n};\n\n\n\nchar* getRandomName()\n{\n int length = rand()%10 + 5;\n char* word = calloc(length + 2, sizeof(char));\n\n for (int i=1; i<length; i++)\n {\n word[i] = (char) (rand() % ('z' - 'a') + 'a');\n }\n\n word[length] = '\\0';\n word[0] = '/';\n\n return word;\n}\n\n\n\nstruct queueInfo init()\n{\n srand(time(0));\n\n char* name = getRandomName();\n struct mq_attr stats;\n stats.mq_msgsize = MAX_MSG_LENGTH;\n stats.mq_flags = 0;\n stats.mq_curmsgs = 0;\n stats.mq_maxmsg = 10;\n\n mqd_t queue = mq_open(name, O_RDWR | O_CREAT, 0666, &stats);\n\n struct queueInfo info = {name, queue};\n\n return info;\n}\n\n\n\nchar* prepareMessage(int mode, char* queueName, char* arg)\n{\n char* senderBuf = calloc(MAX_MSG_LENGTH, sizeof(char));\n char* modeString = calloc(2, sizeof(char));\n\n snprintf(modeString, 2, \"%d\", mode);\n strcpy(senderBuf, modeString);\n strcat(senderBuf, \"|\");\n strcat(senderBuf, queueName);\n strcat(senderBuf, \"|\");\n strcat(senderBuf, arg);\n\n free(modeString);\n return senderBuf;\n}\n\n\n\nmqd_t connect(struct queueInfo info)\n{\n struct mq_attr stats;\n stats.mq_msgsize = MAX_MSG_LENGTH;\n stats.mq_flags = 0;\n stats.mq_curmsgs = 0;\n stats.mq_maxmsg = 10;\n\n mqd_t serverQueue = mq_open(SERVER, O_WRONLY, 0666, &stats);\n char buf[MAX_MSG_LENGTH];\n\n mq_send(serverQueue, prepareMessage(INIT, info.name, \"\"), MAX_MSG_LENGTH, 3);\n\n while (!mq_receive(info.queue, buf, MAX_MSG_LENGTH, NULL));\n printf(\"Connected!\\n\");\n\n return serverQueue;\n}\n\n\n\nvoid sendMessages(FILE* fd, mqd_t serverQueue, struct queueInfo info)\n{\n size_t length = MAX_MSG_LENGTH;\n char* arguments[2];\n\n char queue[20];\n snprintf(queue, 20, \"%s\", info.name);\n\n while(1)\n {\n char* buffer = calloc(MAX_MSG_LENGTH, sizeof(char));\n getline(&buffer, &length, fd);\n\n char* bufferCopy = calloc(MAX_MSG_LENGTH, sizeof(char));\n strcpy(bufferCopy, buffer);\n\n arguments[0] = strtok(bufferCopy, \" \\n\");\n arguments[1] = strtok(NULL, \"\\n\");\n\n if (arguments[0] == NULL) continue;\n\n\n if (!strcmp(arguments[0], \"TIME\"))\n {\n mq_send(serverQueue, prepareMessage(TIME, info.name, \"\"), MAX_MSG_LENGTH, 3);\n\n while (!mq_receive(info.queue, bufferCopy, MAX_MSG_LENGTH, NULL));\n\n strtok(bufferCopy, \"|\");\n strtok(NULL, \"|\");\n printf(\"%s\\n\", strtok(NULL, \"\\n\"));\n }\n\n else if (!strcmp(arguments[0], \"END\"))\n {\n mq_send(serverQueue, prepareMessage(END, info.name, \"\"), MAX_MSG_LENGTH, 3);\n\n usleep(100);\n\n mq_close(info.queue);\n mq_unlink(info.name);\n\n break;\n }\n\n else if (!strcmp(arguments[0], \"CALC\"))\n {\n char* senderBuf = calloc(MAX_MSG_LENGTH, sizeof(char));\n\n mq_send(serverQueue, prepareMessage(CALC, info.name, arguments[1]), MAX_MSG_LENGTH, 3);\n\n while (!mq_receive(info.queue, senderBuf, MAX_MSG_LENGTH, NULL));\n\n strtok(senderBuf, \"|\");\n strtok(NULL, \"|\");\n printf(\"%s\\n\", strtok(NULL, \"\\n\"));\n\n free(senderBuf);\n\n }\n\n else if (!strcmp(arguments[0], \"MIRROR\"))\n {\n char* senderBuf = calloc(MAX_MSG_LENGTH, sizeof(char));\n\n mq_send(serverQueue, prepareMessage(MIRROR, info.name, arguments[1]), MAX_MSG_LENGTH, 3);\n\n while (!mq_receive(info.queue, senderBuf, MAX_MSG_LENGTH, NULL));\n\n strtok(senderBuf, \"|\");\n strtok(NULL, \"|\");\n printf(\"%s\\n\", strtok(NULL, \"\\n\"));\n\n free(senderBuf);\n }\n\n free(buffer);\n free(bufferCopy);\n }\n}\n\n\nvoid sender(int argc, char** argv)\n{\n struct queueInfo info = init();\n mqd_t serverQueue = connect(info);\n\n if (argc == 1)\n {\n sendMessages(stdin, serverQueue, info);\n }\n else\n {\n FILE* file = fopen(argv[1], \"r\");\n sendMessages(file, serverQueue, info);\n }\n}\n\n\nint main(int argc, char** argv)\n{\n sender(argc, argv);\n}\n\n" }, { "alpha_fraction": 0.5918367505073547, "alphanum_fraction": 0.6326530575752258, "avg_line_length": 7.166666507720947, "blob_id": "c6cf4a243338a674542d73d90e77c07897f08ccc", "content_id": "c69a88d643460078959e3742ce8b057573cb49e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 49, "license_type": "permissive", "max_line_length": 17, "num_lines": 6, "path": "/SysOpy/lab4/src/zad3/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "all: zad3.c\n\tgcc -Wall zad3.c\n\n\nclean:\n\trm a.out\n" }, { "alpha_fraction": 0.5799999833106995, "alphanum_fraction": 0.6200000047683716, "avg_line_length": 7.166666507720947, "blob_id": "917e42bf9c21857184121ce6a78deae99d92ff34", "content_id": "0cb2d26fc5e0cfc113d18103ca1cf27545db672c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 50, "license_type": "permissive", "max_line_length": 17, "num_lines": 6, "path": "/SysOpy/lab4/src/zad1/Makefile", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "\nall: zad1.c\n\tgcc -Wall zad1.c\n\n\nclean:\n\trm a.out\n" }, { "alpha_fraction": 0.4567876160144806, "alphanum_fraction": 0.4696290194988251, "avg_line_length": 23.29207992553711, "blob_id": "9d5cca9d910a19096096d01316e520741b58a1b2", "content_id": "cc85c08e4720b0ac0133f1420f77bdfc988510e8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4906, "license_type": "permissive", "max_line_length": 90, "num_lines": 202, "path": "/SysOpy/lab6/src/zad2/server.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <sys/types.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <time.h>\n#include <string.h>\n#include <unistd.h>\n#include <fcntl.h>\n#include <mqueue.h>\n\n#include \"properties.h\"\n\n\n\nmqd_t init()\n{\n struct mq_attr stats;\n stats.mq_msgsize = MAX_MSG_LENGTH;\n stats.mq_flags = 0;\n stats.mq_curmsgs = 0;\n stats.mq_maxmsg = 10;\n\n mqd_t queue = mq_open(SERVER, O_RDONLY | O_CREAT, 0666, &stats);\n\n return queue;\n}\n\n\n\nchar* getDate()\n{\n time_t now;\n time(&now);\n char* result = calloc(50, sizeof(char));\n snprintf(result, MAX_MSG_LENGTH, \"%s\", ctime(&now));\n return result;\n}\n\n\n\nchar* prepareMessage(int mode, char* arg)\n{\n char* senderBuf = calloc(MAX_MSG_LENGTH, sizeof(char));\n char* modeString = calloc(2, sizeof(char));\n\n snprintf(modeString, 2, \"%d\", mode);\n strcpy(senderBuf, modeString);\n strcat(senderBuf, \"|\");\n strcat(senderBuf, SERVER);\n strcat(senderBuf, \"|\");\n strcat(senderBuf, arg);\n\n free(modeString);\n return senderBuf;\n}\n\n\n\nvoid receive(mqd_t que)\n{\n mqd_t* clients = calloc(0, sizeof(mqd_t));\n int numberOfClients = 0;\n\n while (1)\n {\n char* buffer = calloc(MAX_MSG_LENGTH, sizeof(char));\n mq_receive(que, buffer, MAX_MSG_LENGTH, NULL);\n\n char bufferCopy[MAX_MSG_LENGTH];\n strcpy(bufferCopy, buffer);\n\n struct mq_attr stats;\n stats.mq_msgsize = MAX_MSG_LENGTH;\n stats.mq_flags = 0;\n stats.mq_curmsgs = 0;\n stats.mq_maxmsg = 10;\n\n strtok(bufferCopy, \"|\");\n char* clientQueueName = strtok(NULL, \"|\");\n\n mqd_t clientQueue = mq_open(clientQueueName, O_WRONLY, 0644, &stats);\n\n usleep(10000);\n\n char serverQueue[20];\n snprintf(serverQueue, 20, \"%d\", que);\n\n switch(buffer[0] - '1' + 1)\n {\n case INIT:\n {\n printf(\"init\\t%s\", getDate());\n\n clients = realloc(clients, sizeof(mqd_t)*(numberOfClients+1));\n clients[numberOfClients] = clientQueue;\n numberOfClients++;\n\n mq_send(clientQueue, prepareMessage(INIT, \"\"), MAX_MSG_LENGTH, 3);\n\n break;\n }\n\n case CALC:\n {\n printf(\"calc\\t%s\", getDate());\n\n int numbers[2];\n char bufferCopy[MAX_MSG_LENGTH];\n strcpy(bufferCopy, buffer);\n\n strtok(bufferCopy, \"|\");\n strtok(NULL, \"|\");\n char* expression = strtok(NULL, \"\\n\");\n\n char op = *(strpbrk(expression, \"+-/*\"));\n numbers[0] = atoi(strtok(expression, \" +-*/\\n\"));\n numbers[1] = atoi(strtok(NULL, \"\\n\"));\n\n double result;\n switch(op)\n {\n case '+': { result = numbers[0] + numbers[1]; break; }\n case '-': { result = numbers[0] - numbers[1]; break; }\n case '*': { result = numbers[0] * numbers[1]; break; }\n case '/': { result = (double)numbers[0] / (double)numbers[1]; break; }\n }\n\n char out[20];\n snprintf(out, MAX_MSG_LENGTH, \"%f\", result);\n\n mq_send(clientQueue, prepareMessage(CALC, out), MAX_MSG_LENGTH, 3);\n\n break;\n }\n\n case TIME:\n {\n printf(\"time\\t%s\", getDate());\n\n char out[40];\n snprintf(out, MAX_MSG_LENGTH, \"%s\", getDate());\n\n mq_send(clientQueue, prepareMessage(TIME, out), MAX_MSG_LENGTH, 3);\n\n break;\n }\n\n case END:\n {\n printf(\"end\\t%s\", getDate());\n\n for (int i=0; i<numberOfClients; i++)\n {\n mq_close(clients[i]);\n }\n\n mq_unlink(SERVER);\n\n return;\n }\n\n case MIRROR:\n {\n printf(\"mirror\\t%s\", getDate());\n\n char bufferCopy[MAX_MSG_LENGTH];\n strcpy(bufferCopy, buffer);\n strtok(bufferCopy, \"|\");\n strtok(NULL, \"|\");\n char* sentence = strtok(NULL, \"\\n\");\n char* result = calloc(strlen(sentence) + 1, sizeof(char));\n\n for (int i=strlen(sentence) - 1; i>=0; i--)\n {\n strcat(result, (char[2]) {sentence[i], '\\0'});\n }\n result[strlen(sentence)] = '\\0';\n\n mq_send(clientQueue, prepareMessage(MIRROR, result), MAX_MSG_LENGTH, 3);\n\n free(result);\n\n break;\n }\n\n default:\n {\n printf(\"Unknown msg type\\n\");\n exit(-1);\n }\n\n }\n free(buffer);\n }\n}\n\n\n\nint main()\n{\n mqd_t queue = init();\n receive(queue);\n}" }, { "alpha_fraction": 0.5014070272445679, "alphanum_fraction": 0.5198260545730591, "avg_line_length": 19.898395538330078, "blob_id": "d2d4544e836f7c8e82ff6a7ceed5aa0a12b883d4", "content_id": "29ad9f5d4cbee301cc7a783ac30a14fd56877d53", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3909, "license_type": "permissive", "max_line_length": 95, "num_lines": 187, "path": "/SysOpy/lab7/src/zad1/barber.c", "repo_name": "Kacpro/Semestr_4", "src_encoding": "UTF-8", "text": "#include <sys/param.h>\n#include <sys/sem.h>\n#include <sys/shm.h>\n#include <stdlib.h>\n#include <stdio.h>\n#include <sys/msg.h>\n#include <unistd.h>\n#include <time.h>\n\n\n\nenum status\n{\n SLEEP,\n WAKES,\n SITS,\n SITS_FIRST,\n START,\n END,\n LEAVES,\n INVITES\n};\n\nstruct data\n{\n int waitingClients;\n int queueLength;\n enum status status;\n pid_t clientPid;\n pid_t barberPid;\n};\n\n\n\nstruct data* cMemPtr;\nint semSet;\nint queue;\nint commonMemory;\n\n\n\nlong getTime()\n{\n struct timespec times;\n\n clock_gettime(CLOCK_MONOTONIC, &times);\n return times.tv_nsec;\n}\n\n\nvoid barberLogic()\n{\n struct sembuf* sops = calloc(1, sizeof(struct sembuf));\n sops[0].sem_flg = 0;\n\n struct msgbuf {\n long mtype;\n pid_t pid;\n char mtext[0];\n };\n\n struct msgbuf msg;\n msg.mtype = 1;\n\n perror(NULL);\n\n\n while(1)\n {\n sops[0].sem_num = 0;\n sops[0].sem_op = -1;\n semop(semSet, sops, 1);\n\n// perror(NULL);\n\n if (cMemPtr->status == WAKES && cMemPtr->barberPid == getpid())\n {\n cMemPtr->status = SITS_FIRST;\n printf(\"Barber wakes up\\t\\t\\t\\t%ld\\n\", getTime());\n\n sops[0].sem_num = 0;\n sops[0].sem_op = 1;\n semop(semSet, sops, 1);\n\n continue;\n }\n else if (cMemPtr->status == START && cMemPtr->barberPid == getpid())\n {\n cMemPtr->status = END;\n printf(\"Barber starts cutting\\t\\t%d\\t%ld\\n\", cMemPtr->clientPid, getTime());\n\n sops[0].sem_num = 0;\n sops[0].sem_op = 1;\n semop(semSet, sops, 1);\n\n continue;\n }\n else if (cMemPtr->status == END && cMemPtr->barberPid == getpid())\n {\n cMemPtr->status = LEAVES;\n printf(\"Barber ends cutting\\t\\t%d\\t%ld\\n\", cMemPtr->clientPid, getTime());\n\n sops[0].sem_num = 0;\n sops[0].sem_op = 1;\n semop(semSet, sops, 1);\n\n continue;\n }\n else if (cMemPtr->status == INVITES && cMemPtr->barberPid == getpid())\n {\n // usleep(1000);\n if (cMemPtr->waitingClients > 0)\n {\n msgrcv(queue, &msg, 10, 0, IPC_NOWAIT);\n cMemPtr->clientPid = msg.pid;\n cMemPtr->status = SITS;\n printf(\"Barber invites next client\\t%d\\t%ld\\n\", cMemPtr->clientPid, getTime());\n }\n else\n {\n cMemPtr->status = SLEEP;\n cMemPtr->clientPid = 0;\n printf(\"Barber falls asleep\\t\\t\\t%ld\\n\", getTime());\n }\n\n sops[0].sem_num = 0;\n sops[0].sem_op = 1;\n semop(semSet, sops, 1);\n\n continue;\n }\n else\n {\n\n sops[0].sem_num = 0;\n sops[0].sem_op = 1;\n semop(semSet, sops, 1);\n\n }\n\n }\n}\n\n\nvoid signalHandler()\n{\n msgctl(queue, IPC_RMID, NULL);\n\n shmdt(cMemPtr);\n shmctl(commonMemory, IPC_RMID, NULL);\n\n semctl(semSet, 0, IPC_RMID, NULL);\n\n exit(0);\n}\n\n\nint main(int argc, char** argv)\n{\n key_t key = ftok(getenv(\"HOME\"), 'c');\n\n struct data sharedData;\n sharedData.status = INVITES;\n sharedData.waitingClients = 0;\n sharedData.queueLength = atoi(argv[1]);\n sharedData.clientPid = 0;\n sharedData.barberPid = getpid();\n\n\n commonMemory = shmget(key, sizeof(sharedData), IPC_CREAT | 0622);\n cMemPtr = malloc(sizeof(struct data));\n cMemPtr = shmat(commonMemory, NULL, 0);\n queue = msgget(key, IPC_CREAT | 0622);\n semSet = semget(key, 1, IPC_CREAT | 0622);\n\n struct sembuf* sops = calloc(1, sizeof(struct sembuf));\n sops[0].sem_flg = 0;\n sops[0].sem_num = 0;\n sops[0].sem_op = 1;\n semop(semSet, sops, 1);\n\n *cMemPtr = sharedData;\n\n signal(SIGINT, signalHandler); //TODO switch to SIGTERM\n\n barberLogic();\n}\n\n" } ]
51
mibrahimfm/euler-project
https://github.com/mibrahimfm/euler-project
b40170cac203893979a7598482c18300dd98d43a
c11111027c760c34ab9f3710042752a71c40df7a
be907b33a11821aa271cd7db89b7b106a1962fd9
refs/heads/master
2020-06-29T12:15:15.491197
2019-08-04T19:20:59
2019-08-04T19:20:59
200,532,489
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5031347870826721, "alphanum_fraction": 0.5219435691833496, "avg_line_length": 14.560976028442383, "blob_id": "d4829b46f0f55dc83e44347d0b9c4ccc267a7509", "content_id": "80a5ee463574ea5910ab0d0679d7b902da465fb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 638, "license_type": "no_license", "max_line_length": 40, "num_lines": 41, "path": "/python/main.py", "repo_name": "mibrahimfm/euler-project", "src_encoding": "UTF-8", "text": "#coding: utf-8\n\n#author: Matheus Ibrahim\n\nimport math\n\ndef gcd(m, n):\n while(m!= 0):\n c = m\n m = n % m\n n = c\n return n\n\ndef lcm(x, y):\n return x * (y / gcd(x,y))\n\ndef P(limit, streak):\n multiple = streak\n for i in range(2, streak):\n multiple = lcm(multiple, i)\n \n limit-=2\n\n minimum = limit // multiple\n multiple = lcm(multiple, streak + 1)\n maximum = limit // multiple\n \n return minimum - maximum\n\n\n\ndef findResult():\n limit = 31\n result = 0\n pow = 4\n for i in range(1, limit+1):\n result += P(pow, i)\n pow *= 4\n print(math.floor(result))\n\nfindResult()\n" } ]
1
bulatok/telebottest
https://github.com/bulatok/telebottest
d818875dfd612c5da32d2b272c104f5e54ab8464
445e1de457527fdf91ebdb94fafebfb278ffc0b1
79a48a71c170d91e99be6e00f5eacb855cf4bc99
refs/heads/main
2023-09-04T15:47:05.729463
2021-07-09T21:01:10
2021-07-09T21:01:10
425,220,083
0
0
null
2021-11-06T10:54:54
2021-07-09T21:01:13
2021-07-09T21:01:10
null
[ { "alpha_fraction": 0.597835123538971, "alphanum_fraction": 0.6128226518630981, "avg_line_length": 26.930233001708984, "blob_id": "58cbf8efe1fe1c7045fb44f910d212b8d5f1db32", "content_id": "b8d097e704ec6692cd885473d8fd053a40f8be3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1354, "license_type": "no_license", "max_line_length": 127, "num_lines": 43, "path": "/main.py", "repo_name": "bulatok/telebottest", "src_encoding": "UTF-8", "text": "import telebot\nimport socket\nimport threading\na = []\ndef scan_port(ip,port):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(0.5)\n try:\n connect = sock.connect((ip,port))\n a.append(port)\n sock.close()\n except:\n pass\n\nbot = telebot.TeleBot(\"TOKEN\", parse_mode=None)\nports = [i for i in range(1,1001)]\[email protected]_handler(commands=['start', 'help'])\ndef send_welcome(message):\n bot.reply_to(message, \"Привет, этот бот создан для поиска открытых портов\\nДля поиска используй команду /ip xxx.xxx.xx.xx\")\n\[email protected]_handler(commands=['ip'])\ndef mode(message):\n ip = '178.214.255.29'\n try:\n ip = (message.text.split())[1]\n except:\n bot.reply_to(message, 'Возникла ошибка, попробуй еще раз!')\n pass\n for element in ports:\n t = threading.Thread(target=scan_port, kwargs={'ip':ip,'port': element})\n\n t.start()\n\n try:\n if len(a)!=0:\n bot.reply_to(message,'Открытые порты: '+str(a))\n a.clear()\n else:\n bot.reply_to(message, 'Нет открытых портов')\n except:\n bot.reply_to(message, 'Возникла ошибка, попробуй еще раз!')\n\nbot.polling()\n" } ]
1
subreena10/moreexercise
https://github.com/subreena10/moreexercise
66ac5d6b19a9775dea147509f30060bff934be8f
7668fcf00672e31e53b3815934158369b3bd7514
3861ff17bfd3ae8f8e3d24dcfbeed2c531bd06b0
refs/heads/main
2023-09-03T20:58:33.026979
2021-10-09T05:13:53
2021-10-09T05:13:53
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6454545259475708, "alphanum_fraction": 0.6454545259475708, "avg_line_length": 19.090909957885742, "blob_id": "1b9368fa98732a1453f98658d119c466c01cd90f", "content_id": "abbd085d1834b35c8b1b5e483e678b2a315225a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 220, "license_type": "no_license", "max_line_length": 31, "num_lines": 11, "path": "/question3.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "# string_name=\"subreenabano\"\n# print(len(string_name))\n\nstring_name=\"navgurukul\"\n# if \"n\" in string_name:\n# print(\"n hai\")\n# else:\n# print(\"n nahie hai\")\n\nprint(\"n\" in string_name)\nprint(type(\"n\" in string_name))" }, { "alpha_fraction": 0.6095238327980042, "alphanum_fraction": 0.6380952596664429, "avg_line_length": 14.142857551574707, "blob_id": "54dc18578bc86af4254ddb31c0db6d140a575043", "content_id": "2a6e031d7549cf62ea52893b74da46b37b87c03e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 43, "num_lines": 7, "path": "/factorial.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "num=int(input(\"enter the factorial num: \"))\ni=1\nfact=1\nwhile i<=num:\n fact=fact*i\n i+=1\nprint(fact)" }, { "alpha_fraction": 0.5869120359420776, "alphanum_fraction": 0.5910020470619202, "avg_line_length": 18.559999465942383, "blob_id": "ffa885a8f3a62130110c162ecc7402452eeeb55b", "content_id": "e2bb3f4905fcf7d9324d44de95e74e78e7e5591f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 489, "license_type": "no_license", "max_line_length": 120, "num_lines": 25, "path": "/splitstring.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "# sentence = \"NavGurukul is an alternative to higher education reducing the barriers of current formal education system\"\n# new_list=[]\n# c=\" \"\n# for i in sentence:\n# if i==\" \":\n# new_list.append(c)\n# c=\" \"\n# else:\n# c+=i\n# if c:\n# new_list.append(c)\n# print(new_list)\n\n\n\nname=\"my name is subreena\"\nstr=name.split(\" \")\nprint(str)\n\n\n# words = \"navgurukul is great\"\n# counter = 0\n# while counter < len(words):\n# print (words[counter])\n# counter+=1\n" }, { "alpha_fraction": 0.25, "alphanum_fraction": 0.37288135290145874, "avg_line_length": 10.800000190734863, "blob_id": "0536c0ed033de53b134482fdf529dabe21bf851d", "content_id": "7a4bc9c5047ca764f02bd88ed7411f8478b40a72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 236, "license_type": "no_license", "max_line_length": 18, "num_lines": 20, "path": "/string.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "# i=891\n# while i<931:\n# z=i-890\n# print(z)\n\n\n# i=891\n# while i<931:\n# # if i%2==0:\n# z=i-890\n# # if z%3==0:\n# print(z)\n# i+=1 \n\n# i=10\n# while i>=1:\n# # z=i-39\n# # print(z)\n# print(i)\n# i-=1\n" }, { "alpha_fraction": 0.4099888205528259, "alphanum_fraction": 0.4442788064479828, "avg_line_length": 16.303226470947266, "blob_id": "dc33a6f08832b755a4396cbf2f98898ca02c6ff4", "content_id": "983f49705359f45b8fbe6f3ba00583bfb9ef247e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2683, "license_type": "no_license", "max_line_length": 89, "num_lines": 155, "path": "/dictlogical.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "# a={'a':[\"saloni\"],'b':[\"subreena\"]}\n# for i in a:\n# a[i] = a[i][0]\n# print(a)\n\n\n# test_list = [{\"id\" : 1, \"data\" : \"HappY\"},\n# {\"id\" : 2, \"data\" : \"BirthDaY\"},\n# {\"id\" : 3, \"data\" : \"Rash\"}]\n# for i in range(len(test_list)):\n# if test_list[i]['id'] == 2:\n# del test_list[i]\n# break\n# print (test_list)\n\n\n# dic={'a':2,'b':9,'c':5}\n# sum=0\n# for i in dic.values():\n# sum=sum+i\n# print(sum)\n\n\n# b={'a':20,'b':50,'c':70}\n# c={}\n# for i in b:\n# f=float(b[i])\n# c.update({i:f})\n# print(c)\n\n\n# dic={1:{1:'one',2:'two'},2:{3:'three',4:'four'},3:{5:'five',6:'six'}}\n# sum=0\n# for i in dic:\n# for j in dic[i]:\n# sum=sum+j\n# print(sum)\n\n\n# a=[1,2,3,4,5]\n# b={}\n# for i in a[::-1]:\n# b={i:b}\n# print(b)\n\n# a={0:10,1:20}\n# a[2]=30\n# print(a)\n\n# a=int(input(\"Enter your number: \"))\n# b={}\n# i=1\n# while i<=a:\n# key=input(\"Enter your key: \")\n# value=int(input(\"Enter your value: \"))\n# b[key]=value\n# i+=1\n# print(b)\n\n# num=int(input(\"enter ur number: \")) # second number is 3.\n# b=num//100\n# c=b%10\n# if c==3:\n# print(\"yes\")\n# else:\n# print(\"no\")\n\n\n# user=input(\"enter ur number: \") #second last number is 3.\n# if \"3\" in (user) and user[2]==\"3\":\n# print(\"yes\")\n# else:\n # print(\"no\") \n\n# x=\"global\"\n# def my_fun():\n# # x=x*2\n# print(x)\n# my_fun()\n# print(x)\n\n\n# i=1\n# while i<=5:\n# print(end=\"\")\n# j=0\n# while j<=i:\n# print ( j*\"*\",end=\"\")\n# j+=1\n# print()\n# i+=1\n\n# if True:\n# if False:\n# if True:\n# print(\"A\")\n# if True:\n# if True:\n# if True:\n# print(\"b\")\n# if True:\n# if False:\n# print(\"c\")\n# elif True:\n# if True:\n# print(\"D\")\n\n# a, b = 10, 20 # ternary operator example.\n# if a != b:\n# if a > b:\n# print(\"a is greater than b\") # condition ? value_if_true : value_if_false\n# else:\n# print(\"b is greater than a\")\n# else:\n# print(\"Both a and b are equal\")\n\n# a={1:23,2:4,5:7}\n# print(sorted(a.values()))\n\n# for i,j in sorted(a.items()):\n# print(i,a[i],j)\n\n\n# sorted_a=dict(sorted(a.items()))\n# print(sorted_a)\n\n# dt = {5:4, 1:6, 6:3}\n\n# sorted_dt = {key: value for key, value in sorted(dt.items(), key=lambda item: item[1])}\n\n# print(sorted_dt)\n\n# a=[\"nikita\"]\n# i=0\n# c={}\n# while i<len(a): \n# j=0\n# e={}\n# while j<len(a[i]):\n# b=a[i][j]\n# a[i]=b\n # e.update({j:b})\n# j+=1\n# i+=1\n# print(e)\n\n# def my_max(a):\n# i=0\n# max=a[i]\n# for i in a:\n# if i>max:\n# max=i\n# return max\n# d=my_max([82,9,52,8,7,6,50])\n# print(d)\n\n" }, { "alpha_fraction": 0.3729729652404785, "alphanum_fraction": 0.44324323534965515, "avg_line_length": 15.909090995788574, "blob_id": "c2709a8519547464cd2477c35b5c39c910ed5af4", "content_id": "021351d5ddc89b68861a55ad63035c5dd92e1abf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 29, "num_lines": 11, "path": "/1to 1000numberstoprint.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "i=1\nwhile i<=1000:\n if i %3==0:\n print(\"nav\",i)\n elif i%7==0:\n print(\"gurukul\",i)\n elif i%21==0:\n print(\"navgurukul\",i)\n else:\n print(i)\n i+=1" }, { "alpha_fraction": 0.5841270089149475, "alphanum_fraction": 0.6253968477249146, "avg_line_length": 25.16666603088379, "blob_id": "7fe2d73d5ed4d34e9db4d3ed9cf4227b5a39d4fa", "content_id": "5613f52de5471b0b5d90258b84326a5ca42379c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 315, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/3input.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "var1=int(input(\"enter first num: \"))\nvar2=int(input(\"enter ur second num:\"))\nvar3=int(input(\"enter ur third num: \"))\nif var1>var2:\n if var1>var3:\n print(var1,\"is bigest num\")\n else:\n print(var3,\"is bigest num\")\nelif var2>var3:\n print(var2,\"is bigest num\")\nelse:\n print(var3,\"is bigest\")\n\n" }, { "alpha_fraction": 0.3791208863258362, "alphanum_fraction": 0.5, "avg_line_length": 17.299999237060547, "blob_id": "ab93d581d56c01c1269579bd06e32af4768d3a7d", "content_id": "8f18323beb89b2d08bdf62d3f927f09a9772c36a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 30, "num_lines": 10, "path": "/quetsion8.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "list1 = [1, 5, 10, 12, 16, 20]\nlist2 = [1, 2, 10, 13, 16]\na=[]\nfor i in list1:\n if i not in a:\n a.append(i)\nfor j in list2:\n if j not in a:\n a.append(j)\nprint(a)" }, { "alpha_fraction": 0.7376425862312317, "alphanum_fraction": 0.7566539645195007, "avg_line_length": 36.71428680419922, "blob_id": "234363d5de0b63d6718aa06b1a42e8b6bd2d2561", "content_id": "7a78a80a2fbdf7cb6615be631fc4cbd1c7b06ba8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 63, "num_lines": 7, "path": "/amountofngstudent.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "numberofstudent=int(input(\"Enter the number no.of students: \"))\nstudentexpenses=int(input(\"Enter the expenses of student: \"))\ntotalexpense=numberofstudent * studentexpenses \nif studentexpenses<=50000:\n print(\"we are in budget\")\nelse:\n print(\"out of budget\")" }, { "alpha_fraction": 0.5257142782211304, "alphanum_fraction": 0.5571428537368774, "avg_line_length": 18.38888931274414, "blob_id": "79a6856f1b63dbe94c698285faa47d67e758a667", "content_id": "f7697c3be49c8b823a711cf4ebf812c22ada4673", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 350, "license_type": "no_license", "max_line_length": 42, "num_lines": 18, "path": "/harshadnumber.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "# number=int(input(\"Enter your number: \"))\n# num=number\n# rem=0\n# sum=0\n# while number>0:\n# rem=number%10\n# sum=sum+rem\n# number=number//10\n# if num%sum==0:\n# print(\"it is harshad number\")\n# else:\n# print(\"no\")\n\n# num=int(input(\"enter your number: \"))\n# if \"3\" in num and num[1]==3 :\n# print(\"yes\")\n# else:\n# print(\"no\")\n\n" }, { "alpha_fraction": 0.5869565010070801, "alphanum_fraction": 0.5869565010070801, "avg_line_length": 23.04347801208496, "blob_id": "33a9c9bf751bfeb7ad190c40a1d900ab1a2a192b", "content_id": "ed12e1eab7ff580a737ad33166e54f4ce8311222", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 552, "license_type": "no_license", "max_line_length": 94, "num_lines": 23, "path": "/question6.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "# string_list = [\"Rishabh\", \"Rishabh\", \"Abhishek\", \"Rishabh\", \"Divyashish\", \"Divyashish\"]\n# a=[]\n# for i in string_list:\n# if i not in a:\n# a.append(i)\n# print(a)\n\n\n\n# string_list = [\"Rishabh\", \"Rishabh\", \"Abhishek\", \"Rishabh\", \"Divyashish\", \"Divyashish\"]\n# new_list=[]\n# for i in string_list:\n# if i not in new_list:\n# new_list.append(i)\n# print(new_list)\n\n\nstring_list = [\"Empathy\", \"Empathy\", \"Kindness\", \"Kindness\", \"Compassion\", \"Humble\", \"Humble\"]\nb=[]\nfor i in string_list:\n if i not in b:\n b.append(i)\nprint(b)" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.4934210479259491, "avg_line_length": 18.125, "blob_id": "d24b4bfb6324f1ab3f0d1717df73f3b495892760", "content_id": "7618ec3902739815d24a2be1a88bae9f3358d131", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "no_license", "max_line_length": 35, "num_lines": 8, "path": "/toduplicatelist.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "list1 = [1, 342, 75, 23, 98]\nlist2 = [75, 23, 98, 12, 78, 10, 1]\na=[]\nfor i in list1:\n if i in list2:\n # print(i)\n a.append(i)\nprint(a)" }, { "alpha_fraction": 0.5141700506210327, "alphanum_fraction": 0.5242915153503418, "avg_line_length": 32, "blob_id": "c6e105da4bd31ce011c36126d083ac3dfc65702e", "content_id": "a8f6c79781d3eeb8c16137b9421f4fd4e273dc1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 494, "license_type": "no_license", "max_line_length": 56, "num_lines": 15, "path": "/strongpassword.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "strong_password=input(\"Enter ur password:\")\n\nif len(strong_password) is \"6\" or \"16\":\n if \"A\" or \"Z\" in strong_password :\n if \"$\" in strong_password :\n if \"2\" or \"8\" in strong_password :\n print(\"your password is strong.\")\n else:\n print(\"your password is not strong\")\n else:\n print(\"medium password.\")\n else:\n print(\"create a strong password.\")\nelse:\n print(\"passwrd is not strong. \")" }, { "alpha_fraction": 0.357251912355423, "alphanum_fraction": 0.47633588314056396, "avg_line_length": 27.521739959716797, "blob_id": "188b7c4a6ab6641649f97a173a4a69f23da61053", "content_id": "be18b8f411239ea1ab6f779c7770423f0aa28845", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 655, "license_type": "no_license", "max_line_length": 98, "num_lines": 23, "path": "/question10.py", "repo_name": "subreena10/moreexercise", "src_encoding": "UTF-8", "text": "# big_list = [[1,2,3], [5,8,9], [4,3,77,521,31,311]]\n# counter1 = 0\n# while counter1 < len(big_list):\n# small_list_length = len(big_list[counter1])\n# counter2 = 0\n# while counter2 < small_list_length:\n# print (big_list[counter1][counter2])\n# counter2 = counter2 + 1\n# counter1 = counter1 + 1\n# print ('-----')\n\n\n\nmarks = [[45, 21, 42, 63], [12, 42, 42, 53], [42, 90, 78, 13], [94, 89, 78, 76], [87, 55, 98, 99]]\nsum=0\nfor i in marks:\n for j in range(len(i)):\n for k in range(len((i)-j-1)):\n if i[k]<i[k+1]:\n a=i[k]\n i[k]=i[k+1]\n i[k+1]=a\n print(i[k])" } ]
14
aziliak/XML2CSV
https://github.com/aziliak/XML2CSV
f9472c11a72248a4a941e7dbd936198c10e31c2f
aa6e83edb07b77ecd74a1ca31a634f3de2380073
b7326de98da5a22d2198507e69c420f0712621ed
refs/heads/master
2021-01-17T22:37:09.870610
2016-06-09T17:14:33
2016-06-09T17:14:33
60,854,897
0
0
null
2016-06-10T14:43:48
2016-06-10T14:43:49
2016-06-09T17:14:33
Python
[ { "alpha_fraction": 0.6704900860786438, "alphanum_fraction": 0.6767466068267822, "avg_line_length": 29.935483932495117, "blob_id": "5295639f5b7b2156998443cf6c0c669dec5a20d1", "content_id": "a0102cbb65eef9bccfdab8b2cf96c6636bbb4a32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 959, "license_type": "no_license", "max_line_length": 72, "num_lines": 31, "path": "/XML Parse.py", "repo_name": "aziliak/XML2CSV", "src_encoding": "UTF-8", "text": "from bs4 import BeautifulSoup\nfrom collections import defaultdict\nimport pandas as pd\n\n\nxml_file = \"DATA_DICTIONARY.xml\"\nsoup = BeautifulSoup(open(xml_file), \"xml\")\nrows = soup.find_all('ROW')\n\n'''For some reason this xml file had a line break after each\nchild <COLUMN></COLUMN> pair and we filter these out.\n(function __ne__ is called by !=)'''\ncols = list(filter('\\n'.__ne__, rows[0].contents))\n\nindex = [r for r in range(len(rows))]\ncolumns = [c['name'] for c in cols]\nprint(columns)\n\n'''Create a DataFrame \"From dict of Series\" -\nhttp://pandas.pydata.org/pandas-docs/stable/dsintro.html'''\n\nd = defaultdict(list)\nfor row in rows:\n for c in (list(filter('\\n'.__ne__, row.contents))):\n values = d[c['name']]\n values.append(c.string)\nd2 = {}\nfor header in columns:\n d2[header] = pd.Series(d[header], index=index)\ndf = pd.DataFrame(d2, index=index, columns=columns)\ndf.to_csv(\"DATA_DICTIONARY.csv\", sep=',', encoding='utf-8', index=False)\n" } ]
1
HabeshaQueen/PITCH
https://github.com/HabeshaQueen/PITCH
7420d6ecf9f39bdcc68a5778acd3bca0a0260a30
e0e02adf32966bcc622aad4b732e7a7d2db041e5
198d056e535a4132dbb41abf4dc3d849b1ca45f1
refs/heads/master
2020-05-15T12:59:13.929905
2019-04-25T14:17:56
2019-04-25T14:17:56
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7795918583869934, "alphanum_fraction": 0.7795918583869934, "avg_line_length": 26.25, "blob_id": "a7a9cd8da9d7df73878aa0b60b77ac4a14bd2811", "content_id": "735c0a6e83a78c2f4a6489a5c360bb44a59a2dd1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 980, "license_type": "permissive", "max_line_length": 67, "num_lines": 36, "path": "/app/__init__.py", "repo_name": "HabeshaQueen/PITCH", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_bootstrap import Bootstrap\nfrom config import config_options\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\nfrom flask_uploads import UploadSet, configure_uploads, IMAGES\nfrom flask_mail import Mail\nfrom flask_moment import Moment\n\nbootstrap = Bootstrap()\ndb = SQLAlchemy()\nlogin_manager = LoginManager()\nphotos = UploadSet('photos',IMAGES)\nmail=Mail()\nmoment = Moment()\n\ndef create_app(config_name):\n\tapp = Flask(__name__)\n\n\tapp.config.from_object(config_options[config_name])\n\tbootstrap.init_app(app)\n\tdb.init_app(app)\n\tlogin_manager.init_app(app)\n\tmoment.init_app(app)\n\tlogin_manager.session_protection = 'strong'\n\tlogin_manager.login_view = 'auth.login'\n\tconfigure_uploads(app,photos)\n\tmail.init_app(app)\n\n\tfrom .main import main as main_blueprint\n\tapp.register_blueprint(main_blueprint)\n\n\tfrom .auth import auth as auth_blueprint\n\tapp.register_blueprint(auth_blueprint, url_prefix='/authenticate')\n\n\treturn app" }, { "alpha_fraction": 0.7534246444702148, "alphanum_fraction": 0.7534246444702148, "avg_line_length": 36.88888931274414, "blob_id": "1917b7a62af0767b522754e9d2f9d8ee888d1892", "content_id": "8bfe7c23b77ee672af31170312f6e4629c820e19", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1022, "license_type": "permissive", "max_line_length": 272, "num_lines": 27, "path": "/app/main/forms.py", "repo_name": "HabeshaQueen/PITCH", "src_encoding": "UTF-8", "text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField,SelectField\nfrom wtforms.validators import Required, Email, EqualTo\nfrom ..models import User\nfrom wtforms import ValidationError\n\n\nclass UpdateProfile(FlaskForm):\n bio = TextAreaField('ABOUT YOU', validators=[Required()])\n submit = SubmitField('SUBMIT')\n\n\nclass PitchForm(FlaskForm):\n title=StringField(\"Welcome to Pitch Ideas\")\n Pitch_category = SelectField('Pitch Category',choices=[('Technology-Pitch','Technology Pitch'),('Business-Pitch','Business Pitch'),('Interview-Pitch','Interview Pitch'),('Pickup-Line','Pickup-Line Pitch'),('Promotion-Pitch','Promotion Pitch')],validators=[Required()])\n content = TextAreaField('YOUR PITCH')\n submit = SubmitField('SUBMIT')\n\n\nclass CommentForm(FlaskForm):\n comment_id = TextAreaField('WRITE COMMENT')\n submit = SubmitField('SUBMIT')\n\n\nclass CategoriesForm(FlaskForm):\n\tname = TextAreaField('PITCH')\n\tsubmit = SubmitField('SUBMIT')" }, { "alpha_fraction": 0.7129380106925964, "alphanum_fraction": 0.7304581999778748, "avg_line_length": 22.1875, "blob_id": "13dbaac9bc137721e15a716c37661517c82454f2", "content_id": "2cf6ee0f09020a87a7d6f84f578cd34f2a5d80b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 742, "license_type": "permissive", "max_line_length": 86, "num_lines": 32, "path": "/config.py", "repo_name": "HabeshaQueen/PITCH", "src_encoding": "UTF-8", "text": "import os\n\nclass Config:\n\tSECRET_KEY = os.environ.get(\"SECRET_KEY\")\n\tUPLOADED_PHOTOS_DEST = 'app/static/photos'\n\tMAIL_SERVER = 'smtp.googlemail.com'\n\tMAIL_PORT = 587\n\tMAIL_USE_TLS = True\n\tMAIL_USERNAME = os.environ.get(\"MAIL_USERNAME\")\n\tMAIL_PASSWORD = os.environ.get(\"MAIL_PASSWORD\")\n\tSUBJECT_PREFIX = 'PITCH'\n\tSENDER_EMAIL = '[email protected]'\nclass TestConfig(Config):\n\tpass\n # SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://adho:1234@localhost/pitch_test'\n\nclass ProdConfig(Config):\n\tpass\n\tSQLALCHEMY_DATABASE_URI = os.environ.get(\"DATABASE_URL\")\n\n\nclass DevConfig(Config):\n\tSQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://adho:1234@localhost/pitch'\n\n\tDEBUG = True\n\n\n\nconfig_options = {\n\t'development': DevConfig,\n\t'production':ProdConfig\n}\t" }, { "alpha_fraction": 0.680232584476471, "alphanum_fraction": 0.8255813717842102, "avg_line_length": 27.83333396911621, "blob_id": "ff1f47b068000d7c1a274b32f1e4bcab5ad47bc7", "content_id": "5c63403fdf6fd648c57dbca0cd865edad01704d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 172, "license_type": "permissive", "max_line_length": 55, "num_lines": 6, "path": "/start.sh", "repo_name": "HabeshaQueen/PITCH", "src_encoding": "UTF-8", "text": "export PITCH_API_KEY=\"847d5d2867728336624023baf4ee5f4a\"\nexport SECRET_KEY=\"chukula\"\nexport [email protected]\nexport MAIL_PASSWORD=chukula13\n\npython3.6 manage.py server" } ]
4
dremdem/test_field
https://github.com/dremdem/test_field
849eca3f56810689f4136b721de9c5d8619bc608
ac5df739b80f05783d6a15f27aa54e7a81a70ea2
128c56bd2d5110aef846515ff11a3bb75bbdad8b
refs/heads/master
2021-05-14T01:48:58.759766
2018-01-07T15:49:43
2018-01-07T15:49:43
116,576,756
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.5668933987617493, "avg_line_length": 20.047618865966797, "blob_id": "96b475cc25e15eb41c582408b55a246ecfb96ace", "content_id": "5a25db66ba55028c558b454f2d8e98673b19f0f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 516, "license_type": "no_license", "max_line_length": 63, "num_lines": 21, "path": "/templates/field/index.html", "repo_name": "dremdem/test_field", "src_encoding": "UTF-8", "text": "<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n <meta charset=\"UTF-8\">\n <title>Test field</title>\n</head>\n<body>\n\n<h5>Divisions:</h5>\n{% for d in division %}\n <h6>Подразделение {{ d.name }}</h6>\n <ul>\n <li>Номер: {{ d.no }}</li>\n <li>Идентификатор: {{ d.id }}</li>\n <li>Количество департаментов: {{ d.departments.count}}</li>\n <li>Количество сотрудников: {{ d.division_amount }} </li>\n </ul>\n{% endfor %}\n\n</body>\n</html>" }, { "alpha_fraction": 0.6960148215293884, "alphanum_fraction": 0.7089897990226746, "avg_line_length": 31.515151977539062, "blob_id": "d6da60f7a584990767543575b7e7c79eef363bfc", "content_id": "8a67e9b687f17f5e821ad6247783d630d17744b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1079, "license_type": "no_license", "max_line_length": 98, "num_lines": 33, "path": "/field/models.py", "repo_name": "dremdem/test_field", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.db.models.expressions import RawSQL\n\n# Create your models here.\n\n\nclass DManagerWithCount(models.Manager):\n def get_queryset(self):\n return super().get_queryset().annotate(division_amount=RawSQL(\"\"\"\n select count(*) from field_department fd, field_employee fe\n where fd.id = fe.department_id and fd.division_id = field_division.id\n \"\"\", []))\n\n\nclass Division(models.Model):\n name = models.CharField(max_length=200)\n no = models.CharField(max_length=5)\n\n object = models.Manager()\n obj_with_count = DManagerWithCount()\n\n\nclass Department(models.Model):\n name = models.CharField(max_length=200)\n no = models.CharField(max_length=5)\n division = models.ForeignKey(Division, on_delete=models.CASCADE, related_name='departments')\n\n\nclass Employee(models.Model):\n name = models.CharField(max_length=200)\n last_name = models.CharField(max_length=200)\n age = models.IntegerField()\n department = models.ForeignKey(Department, on_delete=models.CASCADE, related_name='employees')\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.707317054271698, "alphanum_fraction": 0.707317054271698, "avg_line_length": 23.399999618530273, "blob_id": "c9f971aac7c10db81cd2908348516c3a8ab99a0d", "content_id": "b9ef143ee8d5631356aa90d33a765af8cc063d78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 62, "num_lines": 10, "path": "/field/views.py", "repo_name": "dremdem/test_field", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .models import Division\n\n# Create your views here.\n\n\ndef index(request):\n d = Division.obj_with_count.all()\n conext = {'division': d}\n return render(request, 'field/index.html', context=conext)\n\n\n" }, { "alpha_fraction": 0.7469135522842407, "alphanum_fraction": 0.7592592835426331, "avg_line_length": 15.199999809265137, "blob_id": "2d23bfb1f5c7124933a7ea62686b57751806abb0", "content_id": "f6485a13a126f0eb1951cfcb72f88a6faa859b66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 162, "license_type": "no_license", "max_line_length": 55, "num_lines": 10, "path": "/README.md", "repo_name": "dremdem/test_field", "src_encoding": "UTF-8", "text": "# test_field\nExample: How to add calcucalted field to a Django model\n\nLinks to youtube:\n\nPart1:\nhttps://youtu.be/pF_UKvcAEtk\n\nPart2:\nhttps://youtu.be/wOMSNsbjgOc\n" } ]
4
wadegilmer/blogz
https://github.com/wadegilmer/blogz
2d68bf29b080032128a8237f8a96db9f1d477618
4cb3c923d4d91257460fc9a2366ae3b5d3ac06e7
5134f3413fe737807211ce744345237418f955af
refs/heads/master
2020-05-24T11:34:55.104109
2019-05-20T21:52:03
2019-05-20T21:52:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6156599521636963, "alphanum_fraction": 0.6192393898963928, "avg_line_length": 31.39130401611328, "blob_id": "5511162e2418101c370c89bf97e622a23451d1a2", "content_id": "52a1f6c1a7331f17dea0041f13b9f838800a8c96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4470, "license_type": "no_license", "max_line_length": 131, "num_lines": 138, "path": "/main.py", "repo_name": "wadegilmer/blogz", "src_encoding": "UTF-8", "text": "from flask import Flask, request, redirect, render_template, flash, session\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://blogz:hello@localhost:8889/blogz'\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\n\napp.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RU'\n\nclass Blog(db.Model):\n\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(120))\n body = db.Column(db.String(500))\n owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n\n def __init__(self, title, body, owner):\n self.title = title\n self.body = body\n self.owner = owner\n\nclass User(db.Model):\n\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(20), unique=True)\n password = db.Column(db.String(20))\n blogs = db.relationship('Blog', backref='owner')\n\n def __init__(self, username, password):\n self.username = username\n self.password = password\n\ndef logged_in_user():\n return User.query.filter_by(username=session['username']).first()\n\ndef get_user_blogs(user_id):\n return Blog.query.filter_by(owner_id=user_id).all()\n\[email protected]_request\ndef require_login():\n allowed_routes = ['login', 'signup', 'index', 'blog']\n if request.endpoint not in allowed_routes and 'username' not in session:\n return redirect('/login')\n\[email protected]('/logout', methods=['POST'])\ndef logout():\n del session['username']\n return redirect('/login')\n\[email protected]('/login', methods=['POST', 'GET'])\ndef login():\n if request.method == \"POST\":\n username = request.form['username']\n password = request.form['password']\n user = User.query.filter_by(username=username).first()\n if user and user.password == password:\n session['username'] = username\n flash('Welcome back ' + username)\n return redirect('/new-post')\n else:\n flash('User Password incorrect, or user does not exist', 'error')\n\n return render_template('login.html')\n\[email protected]('/signup', methods=['POST', 'GET'])\ndef signup():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n verify = request.form['verify']\n existing_user = User.query.filter_by(username=username).first()\n if not existing_user and password == verify and username != '':\n new_user = User(username, password)\n db.session.add(new_user)\n db.session.commit()\n session['username'] = username\n return redirect('/new-post')\n else:\n return \"<h1>Username is already in use.</h1>\"\n\n return render_template('signup.html')\n\[email protected]('/new-post', methods=['POST', 'GET'])\ndef new_post():\n\n if request.method == 'GET':\n return render_template(\"new-post.html\")\n elif request.method == 'POST':\n blog_title = request.form['title']\n blog_body = request.form['body']\n blog_owner = User.query.filter_by(username=session['username']).first()\n if_error = False\n\n if blog_title == '':\n flash('Please fill in the title')\n if_error = True\n if blog_body == '':\n flash('Please fill in the blog entry')\n if_error = True\n if if_error:\n return render_template(\"new-post.html\")\n\n new_post = Blog(blog_title, blog_body, blog_owner)\n db.session.add(new_post)\n db.session.commit()\n id = str(new_post.id)\n\n return redirect(\"/?id=\" + id)\n\[email protected]('/blog')\ndef blog():\n encoded_id = request.args.get(\"id\")\n encoded_user = request.args.get(\"user\")\n all_blogs = Blog.query.all()\n blog = Blog.query.filter_by(id=encoded_id).first()\n\n if encoded_user == '':\n flash('You must login to view your blog')\n return redirect('/blog')\n\n if encoded_user:\n user = User.query.filter_by(username=encoded_user).first()\n user_blogs = Blog.query.filter_by(owner_id=user.id).all()\n return render_template('singleUser.html', all_blogs=all_blogs, user_blogs=user_blogs, blog=blog, encoded_user=encoded_user)\n\n return render_template('singleUser.html', all_blogs=all_blogs, blog=blog, encoded_id=encoded_id)\n\n\[email protected]('/')\ndef index():\n return render_template('index.html', users=User.query.all())\n\n\nif __name__ == '__main__':\n app.run()\n" } ]
1
Schlagoo/sort_algorithms
https://github.com/Schlagoo/sort_algorithms
c28d064888957e4ea3cbbadcd367850690fb81c5
59e2c383f0fc4ad4c9c0982cf99dc1880bae0764
5f111b35cd9fc0b7417e2ff44d8aa1d59203317f
refs/heads/master
2022-04-20T05:10:09.313028
2020-04-20T17:32:55
2020-04-20T17:32:55
257,353,824
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6148812174797058, "alphanum_fraction": 0.6289384365081787, "avg_line_length": 23.414201736450195, "blob_id": "e58938306eb2415072bbe2d37dbffb627f737f04", "content_id": "68248112192e7ecab06deb3f4c3404c452b3e4fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4126, "license_type": "no_license", "max_line_length": 100, "num_lines": 169, "path": "/algorithms.py", "repo_name": "Schlagoo/sort_algorithms", "src_encoding": "UTF-8", "text": "\"\"\" Implementation of different sorting algorithms in Python3 including: \n\tInsertionSort, SelectionSort, BubbleSort, MergeSort, QuickSort.\n\n\tAuthor:\t\thttps://github.com/Schlagoo\n\tDate:\t\t2020/04/20 \t\t\n\tPython: \t3.6.9\n\"\"\"\n\n\nclass SortingAlgorithms:\n\n\tdef __init__(self, arr: list):\n\t\tself.arr = arr\n\n\tdef insertion_sort(self) -> list:\n \n\t\t\"\"\" Insertion sort list by iterting through list and checking if previous element is bigger.\n\n\t\t:return\t\tself.arr\tSorted list\n\t\t\"\"\"\n\n\t\tfor i in range(1, len(self.arr)):\n\t\t\t\n\t\t\tj = i\n\t\t\tmarker = self.arr[i]\n\n\t\t\twhile (j > 0 and self.arr[j - 1] > marker):\n\n\t\t\t\tself.arr[j] = self.arr[j - 1]\n\t\t\t\tj -= 1\n\t\t\t\t\n\t\t\tself.arr[j] = marker\n\n\t\treturn self.arr\n\n\tdef selection_sort(self) -> list:\n \n\t\t\"\"\" Selection sort list by searching for max element and put in at the end of list.\n\t\t\n\t\t:return\t\tself.arr\tSorted list\n\t\t\"\"\"\n\n\t\tn = len(self.arr) - 1\n\n\t\twhile n >= 0:\n\n\t\t\tmax = 0\n\n\t\t\tfor i in range(1, n + 1):\n\t\t\t\tif self.arr[i] > self.arr[max]:\n\t\t\t\t\tmax = i\n\n\t\t\tself.arr[n], self.arr[max] = self.arr[max], self.arr[n]\n\t\t\tn -= 1\n\n\t\treturn self.arr\n\n\tdef bubble_sort(self) -> list:\n\n\t\t\"\"\" Bubble sort algorithm to sort list by iterating through list and comparing values of i and i+1\n\n\t\t:return\t\tself.arr\tSorted list\n\t\t\"\"\"\n\n\t\tfor _ in range(len(self.arr)):\n\t\t\tfor i in range(len(self.arr) - 1):\n\t\t\t\tif self.arr[i] > self.arr[i + 1]:\n\t\t\t\t\tself.arr[i], self.arr[i + 1] = self.arr[i + 1], self.arr[i]\n\t\t\n\t\treturn self.arr\n\n\tdef merge_sort(self, arr: list) -> list:\n\n\t\t\"\"\" Merge sort algorithm to sort list elements by size.\n\n\t\t:param\t\tarr\t\t\tList containing elements\n\t\t:return\t\tmerge()\t\tFunction to merge subsets \t\n\t\t\"\"\"\n\n\t\tif len(arr) < 2:\n\t\t\treturn arr\n\n\t\tleft_half, right_half = [], []\n\t\tm = len(arr) // 2\n\n\t\tleft_half = self.merge_sort(arr[:m])\n\t\tright_half = self.merge_sort(arr[m:])\n\n\t\treturn self.merge(left_half, right_half)\n\n\n\tdef merge(self, left_half: list, right_half: list) -> list:\n\n\t\t\"\"\" Merge left and right half of list by sorting elements.\n\n\t\t:param\t\tleft_half\tLeft subset of elements\n\t\t:param\t\tright_half\tRight subset of elements\n\t\t:return\t\tmerger \t\tResult of merging left and right half\n\t\t\"\"\"\n\n\t\tmerger = []\n\t\ti, j = 0, 0\n\n\t\twhile (len(merger) < len(left_half) + len(right_half)):\n\t\t\t\n\t\t\tif left_half[i] < right_half[j]:\n\t\t\t\tmerger.append(left_half[i])\n\t\t\t\ti+= 1\n\t\t\telse:\n\t\t\t\tmerger.append(right_half[j])\n\t\t\t\tj+= 1\n\n\t\t\tif i == len(left_half) or j == len(right_half):\n\t\t\t\tmerger.extend(left_half[i:] or right_half[j:])\n\n\t\treturn merger\n\n\tdef quick_sort(self, arr: list, lower: int, upper: int) -> list: \n\n\t\t\"\"\" Quick sort list by generating pivot element, partition and sort subsets.\n\t\t\n\t\t:param\t\tarr\t\t\tList containing elements\n\t\t:param\t\tlower\t\tLower bound of current subset\n\t\t:param\t\tupper\t\tUpper bound of current subset\n\t\t:return\t\tarr\t\t\tSorted list\n\t\t\"\"\"\n\n\t\tif upper > lower:\n\t\t\tpivot = (lower + upper) // 2\n\t\t\tnew_pivot = self.sort_partitions(arr, lower, upper, pivot)\n\t\t\tarr = self.quick_sort(arr, lower, new_pivot - 1)\n\t\t\tarr = self.quick_sort(arr, new_pivot + 1, upper)\n\n\t\treturn arr\n\n\n\tdef sort_partitions(self, arr: list, lower: int, upper: int, pivot: int) -> int:\n\n\t\t\"\"\" Sort partitions of elements.\n\t\t\n\t\t:param\t\tarr\t\t\tList containing elements\n\t\t:param\t\tlower\t\tLower bound of current subset\n\t\t:param\t\tupper\t\tUpper bound of current subset\n\t\t:param\t\tpivot\t\tCurrent pivot element\n\t\t:return\t\tnew_pivot\tNext pivot element\n\t\t\"\"\"\n\n\t\tnew_pivot = lower\n\t\tvalue_pivot = arr[pivot]\n\t\t\n\t\tarr[pivot], arr[upper] = arr[upper], arr[pivot]\n\t\t\n\t\tfor i in range(lower, upper):\n\t\t\tif arr[i] <= value_pivot:\n\t\t\t\tarr[new_pivot], arr[i] = arr[i], arr[new_pivot]\n\t\t\t\tnew_pivot += 1\n\t\t\n\t\tarr[new_pivot], arr[upper] = arr[upper], arr[new_pivot]\n\t\t\n\t\treturn new_pivot\n\n\nif __name__ == \"__main__\":\n\ta = SortingAlgorithms([5, 3, 1, 7, 4, 6])\n\t# print(\"Insertion sorted list: {}\".format(a.insertion_sort()))\n\t# print(\"Selection sorted list: {}\".format(a.selection_sort()))\n\t# print(\"Bubble sorted list: {}\".format(a.bubble_sort()))\n\t# print(\"Merge sorted list: {}\".format(a.merge_sort([5, 3, 1, 7, 4, 6])))\n\tprint(\"Quick sorted list: {}\".format(a.quick_sort([5, 3, 1, 7, 4, 6], lower=0, upper=5)))\n" }, { "alpha_fraction": 0.7415350079536438, "alphanum_fraction": 0.7483069896697998, "avg_line_length": 22.3157901763916, "blob_id": "1eb3a101c99495b22775f8bd12191f39a7c8ce5c", "content_id": "34ce21093e973ab1388369ad35fdb9ed762caa26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 886, "license_type": "no_license", "max_line_length": 102, "num_lines": 38, "path": "/README.md", "repo_name": "Schlagoo/sort_algorithms", "src_encoding": "UTF-8", "text": "# Sorting algorithms implemented in Python3\n\nClass containing following sorting algorithms implemented in Python3:\n\n* InsertionSort\n* SelectionSort\n* BubbleSort\n* MergeSort\n* QuickSort\n\n\n### Installing\n\nBefore running the script clone the repository to your desired directory:\n\n~~~\ncd /path/to/desired/directory\ngit clone [email protected]:Schlagoo/sort_algorithms.git\n~~~\n\nAfter uncommenting the desired function, you can run the script from the terminal (linux) via:\n\n(Make shure you the file is executable: `sudo chmod +x ./algorithms.py`!) \n\n~~~\npython3 ./algorithms.py\n~~~\n\nTo choose a algorithm, just uncomment the desired method of the SortingAlgorithms-class at the bottom.\nYou can change the input depending on the algorithm.\n\n## Built with\n\n* [Python 3.6.9](https://www.python.org/) - Programming language\n\n## Author\n\n* **Pascal Schlaak** - *Student* - [Schlagoo](https://github.com/Schlagoo)\n" } ]
2
michaeltriska/ctr_amaro
https://github.com/michaeltriska/ctr_amaro
7b33b55966e3c3543f13039fcbef51ccee59763f
11663ab563077402a30fe76e10a606abb8675172
9172103f9cd4fe5097af638feea78eb60e0825b0
refs/heads/master
2020-03-24T01:26:10.135837
2018-07-25T19:06:03
2018-07-25T19:06:03
142,336,682
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6841432452201843, "alphanum_fraction": 0.687979519367218, "avg_line_length": 22, "blob_id": "0dc89197c007313bd1089fa276d281db72b5b71c", "content_id": "bc394dde3828e10062331647d79fc42bbccdc9ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 782, "license_type": "no_license", "max_line_length": 76, "num_lines": 34, "path": "/service.py", "repo_name": "michaeltriska/ctr_amaro", "src_encoding": "UTF-8", "text": "from tornado.ioloop import IOLoop\nimport tornado.web\nfrom api.api import BusinessAnalysisHandler\n\n\"\"\"\nOverview\n\nThe applications use Tornado as a HTTP server,\nand Schematics for dealing with representations.\n\nProject structure\nThe typical application is structured in submodules:\n\n app\n api - api handler\n core - domain implementation, i.e. crud operatios on representations\n service.py - the service class\n helper - the configuration\n\"\"\"\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n handlers = [(r\"/crt/v2\", BusinessAnalysisHandler)]\n tornado.web.Application.__init__(self, handlers)\n\ndef main():\n app = Application()\n app.listen(80)\n IOLoop.instance().start()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7682119011878967, "alphanum_fraction": 0.7682119011878967, "avg_line_length": 29.200000762939453, "blob_id": "4cda60f1d3c093ef83a84c4e138302b02ea8e12f", "content_id": "f0edc95764f8cd190a7f12cf050c50d76874a04c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 302, "license_type": "no_license", "max_line_length": 46, "num_lines": 10, "path": "/models/models.py", "repo_name": "michaeltriska/ctr_amaro", "src_encoding": "UTF-8", "text": "from schematics.models import Model\nfrom schematics.types import StringType\n\n\nclass AggregationRequest(Model):\n startTimestamp = StringType(required=True)\n endTimestamp = StringType(required=True)\n aggregation = StringType(required=True)\n product = StringType()\n platform = StringType()\n" }, { "alpha_fraction": 0.5609756112098694, "alphanum_fraction": 0.7317073345184326, "avg_line_length": 12.666666984558105, "blob_id": "a4ce3447ace69390ffb00ddd2b82ec7ecb409a19", "content_id": "cb7b35cc56b2f24d99b83e7a734021b1117125c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 41, "license_type": "no_license", "max_line_length": 14, "num_lines": 3, "path": "/requirements.txt", "repo_name": "michaeltriska/ctr_amaro", "src_encoding": "UTF-8", "text": "tornado==5.0.2\npandas==0.23.0\nschematics\n" }, { "alpha_fraction": 0.6656000018119812, "alphanum_fraction": 0.671999990940094, "avg_line_length": 28.761905670166016, "blob_id": "851abc049644039356fae1817cb55f267a36ece4", "content_id": "98301d956f1390efde6a9dbf56fc85271180acb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 625, "license_type": "no_license", "max_line_length": 68, "num_lines": 21, "path": "/helper/helper.py", "repo_name": "michaeltriska/ctr_amaro", "src_encoding": "UTF-8", "text": "from datetime import datetime\nimport pytz\nimport time\n\ndef datetime_range(start, end, delta):\n current = start\n while current < end:\n yield current\n current += delta\n\ndef convert_tz(originalTimeStamp, originalTimeZone, targetTimeZone):\n \"\"\"\n Function converts unix-timestamp in s from\n originalTimeZone to targetTimeZone in ms\n \"\"\"\n newTimeStamp = pytz.timezone(\n originalTimeZone).localize(\n datetime.fromtimestamp(\n originalTimeStamp)).astimezone(\n pytz.timezone(targetTimeZone))\n return time.mktime(newTimeStamp.timetuple()) * 1000\n" }, { "alpha_fraction": 0.6258899569511414, "alphanum_fraction": 0.6485437154769897, "avg_line_length": 33.33333206176758, "blob_id": "d572259f67eea3bca7a78b1ed2e1953a350c149c", "content_id": "b8c7f7b081c54cc4531451840802772d1ba55745", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1545, "license_type": "no_license", "max_line_length": 84, "num_lines": 45, "path": "/api/api.py", "repo_name": "michaeltriska/ctr_amaro", "src_encoding": "UTF-8", "text": "import tornado.web\nimport json\nfrom models.models import AggregationRequest\nfrom core.ctr import CTR_Calculater\n\nclass BusinessAnalysisHandler(tornado.web.RequestHandler):\n\n def post(self, model=None):\n '''\n This function takes a request and calculates\n the click through rate base on that data, as well\n as validates the data structure of request and response.\n It returns a list of objects in following formats:\n\n Request parameters:\n\n startTimestamp : mandatory parameter (in the format of '2016-01-03 13:55:00'\n endTimestamp : mandatory parameter in the format of '2016-01-04 13:55:00'\n aggregation : mandatory parameter, the interval aggregation in minutes\n product : optional parameter as a string\n platform : optional parameter as a string\n\n Response parameters:\n\n timestamp : initial timestamp of each aggregation\n platform : platform as explained above\n product : product as explained above\n CTR : metric calculated as the #purchases / #productViews\n '''\n\n data = json.loads(self.request.body.decode('utf-8'))\n\n try:\n request_model = AggregationRequest(data)\n request_model.validate()\n except:\n raise \"400\"\n\n try:\n ctr_calculator = CTR_Calculater(data)\n response = {\"ctr_response\": ctr_calculator.calculate_ctr()}\n response = json.dumps(response)\n self.write(response)\n except:\n raise \"500\"\n" }, { "alpha_fraction": 0.5439229607582092, "alphanum_fraction": 0.5469313859939575, "avg_line_length": 38.57143020629883, "blob_id": "76329227ef4a24d994c87fa6437977d675e60d28", "content_id": "6ce6cd59a2420366a1371b23219e1679bfb16aca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1662, "license_type": "no_license", "max_line_length": 86, "num_lines": 42, "path": "/connectors/s3.py", "repo_name": "michaeltriska/ctr_amaro", "src_encoding": "UTF-8", "text": "from data.productevent import productevents\nfrom helper.helper import *\nimport pandas as pd\n\nclass S3Handler:\n def __init__(self):\n pass\n\n def get_event_df(self, product, unix_start_timestamp, unix_end_timestamp):\n\n events = [i for i in productevents\n if self.get_event_condition(\n i, unix_start_timestamp,\n unix_end_timestamp)]\n\n product_list = []\n for e in events:\n for v in e['events']:\n code_color = v['data']['custom_attributes']['codeColor']\n if (product and\n code_color == product):\n product_list.append((e[\"timestamp_unixtime_ms\"],\n code_color))\n else:\n product_list.append((e[\"timestamp_unixtime_ms\"],\n code_color))\n\n cols = [\"timestamp_unixtime_ms\", 'code_color']\n df_events = pd.DataFrame(product_list, columns=cols)\n\n df_events['timestamp_unixtime_ms'] = df_events['timestamp_unixtime_ms'] / 1000\n df_events['timestamp_unixtime_ms'] = df_events['timestamp_unixtime_ms'].apply(\n (lambda x: convert_tz(\n x, \"Europe/London\", \"America/Sao_Paulo\"))) \n df_events['timestamp_unixtime_ms'] = \\\n pd.to_datetime(df_events['timestamp_unixtime_ms'], unit='ms')\n\n return df_events\n\n def get_event_condition(self, i, unix_start_timestamp, unix_end_timestamp):\n return (i['timestamp_unixtime_ms'] >= unix_start_timestamp and\n i['timestamp_unixtime_ms'] <= unix_end_timestamp)\n" }, { "alpha_fraction": 0.5684713125228882, "alphanum_fraction": 0.6210191249847412, "avg_line_length": 14.292682647705078, "blob_id": "e1963f729c6561341129097a559fdb334a617cc7", "content_id": "2b7ebe0349cb401551235ed17b37fb9d057561ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 628, "license_type": "no_license", "max_line_length": 135, "num_lines": 41, "path": "/README.md", "repo_name": "michaeltriska/ctr_amaro", "src_encoding": "UTF-8", "text": "CRT calculator\n=============================\n\nBackend for the CRT calculator\n\nDependencies\n------------\n\n* Python >= 3.5, virtualenv\n* Make\n* curl\n\nBuilding\n--------\n\nTo **build** the project run:\n\nmake\n\nStart API\n--------\n\nTo **start** the API run:\n\nmake start\n \nExample curl\n--------\n\nTo get a response use the example curl in another terminal while API is running:\n\ncurl -X POST localhost/crt/v2 -d '{\"startTimestamp\" : \"2016-01-03 13:55:00\",\"endTimestamp\" : \"2019-01-04 13:55:00\",\"aggregation\" : 60}'\n\n\nTesting\n-------\n\nThe **unit tests** live in the main directory.\nTo run the unit tests of the project run:\n\nmake test\n\n" }, { "alpha_fraction": 0.5523724555969238, "alphanum_fraction": 0.5532676577568054, "avg_line_length": 30.91428565979004, "blob_id": "b0556f3809c254c375824a2186f59c0a1f234ce7", "content_id": "98e49040a929c15e23d18accf299a34d00bffd07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1117, "license_type": "no_license", "max_line_length": 77, "num_lines": 35, "path": "/connectors/db_creator.py", "repo_name": "michaeltriska/ctr_amaro", "src_encoding": "UTF-8", "text": "import sqlite3 as sqlite\nimport pandas as pd\n\nclass DataBaseCreator:\n\n def __init__(self, database_name):\n self.connection = sqlite.connect(database_name)\n self.cur = self.connection.cursor()\n\n def create_database(self, table_names):\n\n for table in table_names:\n self.create_table(table)\n print('database created')\n self.connection.close()\n\n def create_table(self, tablename):\n df_orders = self.read_csv('data/' + tablename + '.csv')\n if 'order_date' in df_orders.columns.values:\n df_orders['order_date'] = pd.to_datetime(df_orders['order_date'],\n format='%d/%m/%Y %H:%M')\n try:\n df_orders.to_sql(tablename, self.connection)\n\n except:\n pass\n\n def read_csv(self, file_name):\n return pd.read_csv(file_name, sep=';',\n index_col=None,\n infer_datetime_format=True)\n\nif __name__ == '__main__':\n db = DataBaseCreator('connectors/orders.db')\n db.create_database(['orders', 'order_items'])\n" }, { "alpha_fraction": 0.5524275898933411, "alphanum_fraction": 0.5538555979728699, "avg_line_length": 39.849998474121094, "blob_id": "2a03cc65dd828e78b74229279edc13381950ee17", "content_id": "c4bf06295cf9d8c4d4ec7953dfa3b84760959e41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4912, "license_type": "no_license", "max_line_length": 99, "num_lines": 120, "path": "/core/ctr.py", "repo_name": "michaeltriska/ctr_amaro", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom datetime import timedelta\nimport pandas as pd\nfrom connectors.db import DataBaseHandler\nfrom connectors.s3 import S3Handler\nfrom helper.helper import *\n\n\nclass CTR_Calculater:\n def __init__(self, model):\n print(\"model init\", model)\n\n # initialize and transform request parameters\n self.start_timestamp = model['startTimestamp']\n self.end_timestamp = model['endTimestamp']\n\n self.unix_start_timestamp = convert_tz(\n datetime.strptime(\n self.start_timestamp,\n '%Y-%m-%d %H:%M:%S').timestamp(),\n \"America/Sao_Paulo\", \"Europe/London\")\n self.unix_end_timestamp = convert_tz(\n datetime.strptime(\n self.end_timestamp,\n '%Y-%m-%d %H:%M:%S').timestamp(),\n \"America/Sao_Paulo\", \"Europe/London\")\n self.aggregation = model['aggregation']\n self.product = None\n if 'product' in model:\n self.product = model['product']\n self.platform = None\n if 'platform' in model:\n self.platform = model['platform']\n\n # initialize aggregation dataframe to purchase and events \n self.df_aggregation = pd.DataFrame()\n self.df_aggregation['startTimestamp'] = \\\n [dt for dt in datetime_range(\n datetime.strptime(\n self.start_timestamp,\n '%Y-%m-%d %H:%M:%S'),\n datetime.strptime(\n self.end_timestamp,\n '%Y-%m-%d %H:%M:%S'),\n timedelta(minutes=self.aggregation))]\n self.df_aggregation['startTimestamp_asof'] = \\\n pd.to_datetime(self.df_aggregation['startTimestamp'])\n self.df_aggregation.index = \\\n self.df_aggregation['startTimestamp_asof']\n\n # initialize order dataframe\n db = DataBaseHandler('connectors/orders.db')\n self.df_oders = db.get_purchase_df(\n self.start_timestamp, self.end_timestamp,\n self.product, self.platform)\n\n # initialize event dataframe\n s3_con = S3Handler()\n self.df_events = s3_con.get_event_df(\n self.product,\n self.unix_start_timestamp,\n self.unix_end_timestamp)\n\n # merge dataframe with aggregation dataframe\n self.df_event_itervals = self.merge_by_time_interval(\n self.df_events, self.df_aggregation,\n 'timestamp_unixtime_ms', self.aggregation)\n self.df_oders_itervals = self.merge_by_time_interval(\n self.df_oders, self.df_aggregation,\n 'order_date', self.aggregation)\n\n def calculate_ctr(self):\n a = self.df_event_itervals.startTimestamp.unique()\n b = self.df_oders_itervals.startTimestamp.unique()\n iterator = list(set(a) & set(b))\n\n result = []\n for agg_time in iterator:\n oder_filter = (self.df_oders_itervals.startTimestamp == agg_time)\n unique_products = self.df_oders_itervals[oder_filter].code_color.tolist()\n\n event_filter = (self.df_event_itervals.startTimestamp == agg_time)\n event_iterator = self.df_event_itervals[event_filter].groupby('code_color')\n for product_key, product_value in event_iterator:\n if product_key in unique_products:\n p = self.df_oders_itervals[(self.df_oders_itervals.code_color == product_key)\n & (self.df_oders_itervals.startTimestamp == agg_time)]\n for platform_key, platform_value in p.groupby('device_type'):\n no_p = len(platform_value)\n no_v = len(product_value)\n ctr = no_p / no_v\n result.append({\n \"startTimestamp\": str(agg_time)[:10],\n \"platform\": platform_key,\n \"product\": product_key,\n \"ctr\": ctr})\n return result\n\n def merge_by_time_interval(self, df, df_aggregation, data_col, aggregation):\n \"\"\"\n Function performs an asof merge, as we match on “forward” search\n selects the first row in the aggregation dataframe whose ‘on’ key\n is greater than or equal to the left’s key.\n \"\"\"\n\n if data_col in df.columns.values:\n df['startTimestamp_asof'] = pd.to_datetime(\n df[data_col])\n else:\n return df\n\n df = df.sort_values(by='startTimestamp_asof')\n df.index = df['startTimestamp_asof']\n tol = pd.Timedelta(aggregation, unit='m')\n\n return pd.merge_asof(left=df,\n right=df_aggregation,\n on='startTimestamp_asof',\n direction='backward',\n tolerance=tol)\n" }, { "alpha_fraction": 0.46390804648399353, "alphanum_fraction": 0.4662069082260132, "avg_line_length": 34.655738830566406, "blob_id": "e1940c12abd48b3bd6adf9d6061c88805653fd94", "content_id": "d07e29f2303ede362faa276d5697cb0f7eb352c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2175, "license_type": "no_license", "max_line_length": 70, "num_lines": 61, "path": "/connectors/db.py", "repo_name": "michaeltriska/ctr_amaro", "src_encoding": "UTF-8", "text": "import sqlite3 as sqlite\nfrom datetime import datetime\nimport pandas as pd\nfrom datetime import timedelta\n\nclass DataBaseHandler:\n def __init__(self, database_name):\n self.database = database_name\n self.connection = self._get_connection()\n self.cur = self.connection.cursor()\n\n def _get_connection(self):\n return sqlite.connect(self.database)\n\n def close_connection(self):\n self.connection.close()\n\n def get_purchase_df(self, start, end, product, platform):\n\n sql_query = self.get_purchase_query(\n start, end, product, platform)\n self.cur.execute(sql_query)\n rows = self.cur.fetchall()\n cols = [description[0]\n for description\n in self.cur.description]\n self.connection.close()\n\n df_oders = pd.DataFrame(rows, columns=cols)\n df_oders[\"order_date\"] = [datetime.strptime(\n date, '%Y-%m-%d %H:%M:%S') +\n timedelta(days=700)\n for date in df_oders.order_date]\n return df_oders\n\n def get_purchase_query(self, start, end, product, platform):\n sql_query = \"\"\"\n SELECT\n orders.order_date AS \"order_date\",\n orders.device_type AS \"device_type\",\n order_items.code_color AS \"code_color\",\n orders.id AS \"order_id\"\n FROM\n orders\n INNER JOIN\n order_items\n ON\n orders.id = order_items.order_id\n WHERE\n order_date >= strftime('%s')\n AND order_date < strftime('%s')\n \"\"\" % (start, end)\n\n if product:\n sql_query += ' AND order_items.code_color == \"' + \\\n product + '\"'\n\n if platform:\n sql_query += ' AND orders.device_type == \"' + \\\n platform + '\"'\n return sql_query\n" }, { "alpha_fraction": 0.4244832992553711, "alphanum_fraction": 0.4944356083869934, "avg_line_length": 38.3125, "blob_id": "8c653209d2e00113a5268627d415335a00b1f788", "content_id": "3c635cb1540c59f77ebf1ed75bc6f75b1109bce4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 630, "license_type": "no_license", "max_line_length": 71, "num_lines": 16, "path": "/test_db.py", "repo_name": "michaeltriska/ctr_amaro", "src_encoding": "UTF-8", "text": "import unittest\nfrom connectors.db import DataBaseHandler\n\nclass DataBaseTests(unittest.TestCase):\n\n def test_database(self):\n db = DataBaseHandler('connectors/orders.db')\n rows = db.cur.execute(\"SELECT * FROM orders\")\n assert rows.fetchall()[0] == (0, 144296, 16423318,\n '2016-02-01 00:11:00', 234.8, 0.0,\n 234.8, 'DELIVERED', 'CREDIT CARD',\n 15.39, 'Correios PAC', 'Brasília',\n 'DF', 'google / organic', 'iOS')\n\nif __name__ == '__main__':\n unittest.main()\n" } ]
11
ARiSE-Lab/Patch-as-translation
https://github.com/ARiSE-Lab/Patch-as-translation
d429acbcbfac9813f4703c1a37832435ea0dce98
e2e45a5f9659f2ad03316b9f5cf93adde6a278d4
cd39e578efa84bc05405f572a1d7f8b75cba4fb7
refs/heads/master
2022-12-12T12:11:46.306847
2020-08-30T03:04:34
2020-08-30T03:04:34
289,759,783
9
4
null
null
null
null
null
[ { "alpha_fraction": 0.703172504901886, "alphanum_fraction": 0.71438068151474, "avg_line_length": 55.91351318359375, "blob_id": "38d81f65e1b4b2da641dfca7b597b287e132b987", "content_id": "bd5f977bfc1b81bfb809f1c602497c5606a020fa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10528, "license_type": "permissive", "max_line_length": 361, "num_lines": 185, "path": "/Model/transformer.py", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\n\nclass AttentionLayer(tf.keras.layers.Layer):\n\tdef __init__(self, attention_dim, num_heads=None, hidden_dim=None, bias_dim=None):\n\t\tsuper(AttentionLayer, self).__init__()\n\t\tif hidden_dim is None: hidden_dim = attention_dim\n\t\tself.attention_dim = attention_dim\n\t\tself.hidden_dim = hidden_dim\n\t\tself.num_heads = 1 if num_heads is None else num_heads\n\t\tself.attention_dim_per_head = self.attention_dim // self.num_heads\n\t\tself.bias_dim = bias_dim\n\t\n\tdef build(self, _):\n\t\tself.attn_query = self.add_weight(name='q', shape=(self.hidden_dim, self.num_heads, self.attention_dim_per_head), initializer=\"glorot_uniform\")\n\t\tself.attn_keys = self.add_weight(name='k', shape=(self.hidden_dim, self.num_heads, self.attention_dim_per_head), initializer=\"glorot_uniform\")\n\t\tself.attn_values = self.add_weight(name='v', shape=(self.hidden_dim, self.num_heads, self.attention_dim_per_head), initializer=\"glorot_uniform\")\n\t\tself.weight_out = self.add_weight(name='o', shape=(self.num_heads, self.attention_dim_per_head, self.hidden_dim), initializer=\"glorot_uniform\")\n\t\tif self.bias_dim is not None:\n\t\t\tself.bias_embs = self.add_weight(name='e1', shape=(self.bias_dim, self.attention_dim_per_head), initializer=\"glorot_uniform\")\n\t\t\tself.bias_scalar = self.add_weight(name='e2', shape=(self.attention_dim_per_head, 1), initializer=\"glorot_uniform\")\n\t\n\tdef call(self, states, masks, attention_bias):\n\t\tif len(states) == 2:\n\t\t\tstates, key_states = states\n\t\telse:\n\t\t\tstates, key_states = states[0], states[0]\n\t\treturn self.call_internal(states, key_states, masks, attention_bias)\n\t\n\[email protected](input_signature=[tf.TensorSpec(shape=(None, None, None), dtype=tf.float32), tf.TensorSpec(shape=(None, None, None), dtype=tf.float32), tf.TensorSpec(shape=(None, None, None, None), dtype=tf.float32), tf.TensorSpec(shape=(None, 4), dtype=tf.int32)])\n\tdef call_internal(self, states, key_states, masks, attention_bias):\n\t\t# Compute key, query and value vectors, reshaped to [Batch, Heads, Time, Dim] where Dim is attention_dim//num_heads\n\t\tquery, keys, values = self.compute_qkv(states, key_states)\n\t\t\n\t\t# Compute attention weights, and context from these\n\t\talpha = self.get_attention_weights(query, keys, masks, attention_bias)\n\t\tcontext = tf.einsum('bhqk,bkha->bqha', alpha, values)\n\t\tcontext = tf.einsum('btha,had->btd', context, self.weight_out)\n\t\treturn context\n\t\n\t# Compute key, query and value vectors. If separate key_states are provided, attend over the input instead and thus assume attention is not masked\n\[email protected](input_signature=[tf.TensorSpec(shape=(None, None, None), dtype=tf.float32), tf.TensorSpec(shape=(None, None, None), dtype=tf.float32)])\n\tdef compute_qkv(self, states, key_states):\n\t\tquery = tf.einsum('btd,dha->btha', states, self.attn_query) # Queries are always computed on states\n\t\tkeys = tf.einsum('btd,dha->btha', states if key_states is None else key_states, self.attn_keys)\n\t\tvalues = tf.einsum('btd,dha->btha', states if key_states is None else key_states, self.attn_values)\n\t\treturn query, keys, values\n\t\n\t# Compute attention weights from cross-product between keys and queries (scaled, masked, softmaxed)\n\[email protected](input_signature=[tf.TensorSpec(shape=(None, None, None, None), dtype=tf.float32), tf.TensorSpec(shape=(None, None, None, None), dtype=tf.float32), tf.TensorSpec(shape=(None, None, None, None), dtype=tf.float32), tf.TensorSpec(shape=(None, 4), dtype=tf.int32)])\n\tdef get_attention_weights(self, query, keys, masks, attention_bias):\n\t\talpha = tf.einsum('bkha,bqha->bhqk', keys, query)\n\t\tif self.bias_dim is not None:\n\t\t\tkeys = tf.reduce_sum(keys, -1) # bkh\n\t\t\tbias = tf.matmul(tf.one_hot(attention_bias[:, -1], self.bias_dim), self.bias_embs) # bqka\n\t\t\tbias = tf.squeeze(tf.matmul(bias, self.bias_scalar), -1) # bqk\n\t\t\tbias_shape = tf.shape(alpha)\n\t\t\tbias_shape = tf.stack([bias_shape[0], bias_shape[2], bias_shape[3]])\n\t\t\tbias = tf.scatter_nd(attention_bias[:, :-1], bias, bias_shape)\n\t\t\tbias = tf.einsum('bqk,bkh->bhqk', bias, keys)\n\t\t\talpha += bias\n\t\talpha *= tf.math.rsqrt(tf.cast(self.attention_dim_per_head, \"float32\"))\n\t\tif masks is not None:\n\t\t\talpha = alpha * masks + (1.0 - tf.math.ceil(masks)) * tf.float32.min\n\t\talpha = tf.nn.softmax(alpha)\n\t\tif masks is not None:\n\t\t\talpha *= masks\n\t\treturn alpha\n\nclass Transformer(tf.keras.layers.Layer):\n\tdef __init__(self, model_config, vocab_dim, shared_embedding=None, bias_dim=None):\n\t\tsuper(Transformer, self).__init__()\n\t\tself.embed_dim = model_config[\"embed_dim\"]\n\t\tself.hidden_dim = model_config[\"hidden_dim\"]\n\t\tself.ff_dim = model_config[\"ff_dim\"]\n\t\tself.attention_dim = model_config[\"attention_dim\"]\n\t\tself.num_layers = model_config[\"num_layers\"]\n\t\tself.num_heads = model_config[\"num_heads\"]\n\t\tself.dropout_rate = model_config[\"dropout_rate\"]\n\t\tself.bias_dim = bias_dim\n\t\t\n\t\tself.vocab_dim = vocab_dim\n\t\tself.embed = shared_embedding\n\t\tself.pos_enc = tf.constant(positional_encoding(model_config[\"embed_dim\"], 2000))\n\t\n\tdef build(self, _):\n\t\t# Set up embedding and multi-headed attention layers\n\t\tif self.embed is None:\n\t\t\trandom_init = tf.random_normal_initializer(stddev=self.hidden_dim ** -0.5)\n\t\t\tself.embed = tf.Variable(random_init([self.vocab_dim, self.embed_dim]), dtype=tf.float32)\n\t\t\n\t\tmake_att = lambda : AttentionLayer(self.attention_dim, self.num_heads, self.hidden_dim, self.bias_dim)\n\t\tself.attention = [make_att() for _ in range(self.num_layers)]#make_att_deprecated\n\t\tself.enc_attention = [make_att() for _ in range(self.num_layers)]\n\t\t\n\t\t# Layer normalization for every residual layer\n\t\tself.ln = [[tf.keras.layers.LayerNormalization() for _ in range(3)] for _ in range(self.num_layers)]\n\t\tself.ln_out = tf.keras.layers.LayerNormalization()\n\t\t\n\t\t# Two-layer feed-forward with wide layer in the middle\n\t\tself.ff_1 = [tf.keras.layers.Dense(self.ff_dim, activation=\"relu\") for _ in range(self.num_layers)]\n\t\tself.ff_2 = [tf.keras.layers.Dense(self.hidden_dim) for _ in range(self.num_layers)]\n\t\n\t\"\"\"Transformer language model: converts indices into hidden states through 6 layers of multi-headed attention\n\t\tTo generate language from the resulting states, pass the states to \"predict\". Note that predict assumes input vocabulary is output vocabulary.\n\t\n\tArgs:\n\t\tmask: if not None, used to mask tokens e.g. \"future\" tokens. See \"get_sequence_mask\" to get a mask specifically for this purpose\n\t\tenc_states: If not None, applies both self-attention and input attention. In that case, we never mask attention -- encoded states are assumed to be fully known\n\t\"\"\"\n\tdef call(self, inputs, masks, training, attention_bias=None):\n\t\tis_enc_dec = len(inputs) == 2\n\t\tif is_enc_dec:\n\t\t\tinputs, key_states = inputs\n\t\t\tmasks, key_masks = masks\n\t\telse:\n\t\t\tinputs = inputs[0]\n\t\t\tmasks = masks[0]\n\t\tif attention_bias is None: attention_bias = tf.zeros((0,4), dtype='int32')\n\t\tif is_enc_dec:\n\t\t\treturn self.enc_dec_attention(inputs, masks, key_states, key_masks, attention_bias, training)\n\t\telse:\n\t\t\treturn self.self_attention(inputs, masks, attention_bias, training)\n\n\[email protected](input_signature=[tf.TensorSpec(shape=(None, None), dtype=tf.int32)])\n\tdef embed_inputs(self, inputs):\n\t\tstates = tf.nn.embedding_lookup(self.embed, inputs)\n\t\tstates *= tf.math.sqrt(tf.cast(tf.shape(states)[-1], \"float32\"))\n\t\tstates += self.pos_enc[:tf.shape(states)[1]]\n\t\treturn states\n\t\n\[email protected](input_signature=[tf.TensorSpec(shape=(None, None), dtype=tf.int32), tf.TensorSpec(shape=(None, None, None, None), dtype=tf.float32), tf.TensorSpec(shape=(None, 4), dtype=tf.int32), tf.TensorSpec(shape=None, dtype=tf.bool)])\n\tdef self_attention(self, inputs, masks, attention_bias, training):\n\t\tstates = self.embed_inputs(inputs)\n\t\tfor ix in range(self.num_layers):\n\t\t\tnew_states = (self.ln[ix][0](states),)\n\t\t\tnew_states = self.attention[ix](new_states, masks, attention_bias)\n\t\t\tif training: new_states = tf.nn.dropout(new_states, rate=self.dropout_rate)\n\t\t\tstates += new_states\n\t\t\t\n\t\t\tnew_states = self.ff_1[ix](self.ln[ix][1](states))\n\t\t\tif training: new_states = tf.nn.dropout(new_states, rate=self.dropout_rate)\n\t\t\tnew_states = self.ff_2[ix](new_states)\n\t\t\tif training: new_states = tf.nn.dropout(new_states, rate=self.dropout_rate)\n\t\t\tstates += new_states\n\t\treturn self.ln_out(states)\n\t\n\[email protected](input_signature=[tf.TensorSpec(shape=(None, None), dtype=tf.int32), tf.TensorSpec(shape=(None, None, None, None), dtype=tf.float32), tf.TensorSpec(shape=(None, None, None), dtype=tf.float32), tf.TensorSpec(shape=(None, None, None, None), dtype=tf.float32), tf.TensorSpec(shape=(None, 4), dtype=tf.int32), tf.TensorSpec(shape=None, dtype=tf.bool)])\n\tdef enc_dec_attention(self, inputs, masks, key_states, key_masks, attention_bias, training):\n\t\tstates = self.embed_inputs(inputs)\n\t\tfor ix in range(self.num_layers):\n\t\t\tnew_states = (self.ln[ix][0](states),)\n\t\t\tnew_states = self.attention[ix](new_states, masks, tf.zeros((0,4), dtype='int32'))\n\t\t\tif training: new_states = tf.nn.dropout(new_states, rate=self.dropout_rate)\n\t\t\tstates += new_states\n\n\t\t\tnew_states = self.ln[ix][2](states)\n\t\t\tnew_states = self.enc_attention[ix]((new_states, key_states), key_masks, attention_bias)\n\t\t\tif training: new_states = tf.nn.dropout(new_states, rate=self.dropout_rate)\n\t\t\tstates += new_states\n\t\t\t\n\t\t\tnew_states = self.ff_1[ix](self.ln[ix][1](states))\n\t\t\tif training: new_states = tf.nn.dropout(new_states, rate=self.dropout_rate)\n\t\t\tnew_states = self.ff_2[ix](new_states)\n\t\t\tif training: new_states = tf.nn.dropout(new_states, rate=self.dropout_rate)\n\t\t\tstates += new_states\n\t\treturn self.ln_out(states)\n\t\n\t\"\"\"Returns a sequence mask in which each token can only see states up to its own position. Useful for generative language modeling (e.g. decoding).\"\"\"\n\tdef get_sequence_mask(self, seq_len):\n\t\treturn tf.sequence_mask(lengths=tf.range(1, seq_len + 1), maxlen=seq_len, dtype=tf.float32)\n\t\n\t\"\"\"Generates tokens from transformer states using the transposed embedding layer\"\"\"\n\[email protected](input_signature=[tf.TensorSpec(shape=(None, None, None), dtype=tf.float32)])\n\tdef predict(self, states):\n\t\treturn tf.matmul(states, self.embed, transpose_b=True)\n\n\n# Based on https://github.com/DongjunLee/transformer-tensorflow/blob/master/transformer/attention.py\ndef positional_encoding(dim, sentence_length, dtype=tf.float32):\n\tencoded_vec = np.array([pos/np.power(10000, 2*i/dim) for pos in range(sentence_length) for i in range(dim)])\n\tencoded_vec[::2] = np.sin(encoded_vec[::2])\n\tencoded_vec[1::2] = np.cos(encoded_vec[1::2])\n\tpos_enc = tf.constant(encoded_vec.reshape([sentence_length, dim]), dtype=dtype)\n\treturn pos_enc" }, { "alpha_fraction": 0.5171805024147034, "alphanum_fraction": 0.5309088230133057, "avg_line_length": 32.48118209838867, "blob_id": "d82bb1fc627592de3e24021de19b4354415ee576", "content_id": "0013c1f4dd2324e47bbb982a192c64232b3300ed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12456, "license_type": "permissive", "max_line_length": 117, "num_lines": 372, "path": "/Analysis/code/util.py", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport csv\nimport glob\nimport shutil\nimport javalang\nimport numpy as np\nimport nltk\nfrom nltk import ngrams\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib.ticker import PercentFormatter\n\nplt.rcParams.update({'font.size': 20})\n\ndef heatMapFromCSV(csvFile, buggyKey, patchKey, metric):\n realDataB = list()\n realDataP = list()\n with open(csvFile, 'r') as csvf:\n reader = csv.DictReader(csvf, delimiter='\\t')\n for r in reader:\n try:\n realDataB.append(float(r[buggyKey]))\n realDataP.append(float(r[patchKey]))\n except:\n print(r)\n continue\n x = np.array(realDataB)\n y = np.array(realDataP)\n\n heatmap, xedges, yedges = np.histogram2d(x, y, bins=[10, 10])\n\n extent = [0, 1, 0, 1]\n\n plt.xlabel('Buggy ' + metric)\n plt.ylabel('Patch ' + metric)\n #plt.title('Ave-' + metric)\n\n #plt.clf()\n plt.imshow(heatmap.T, extent=extent, norm=mpl.colors.LogNorm(), cmap='inferno', origin='lower')\n\n cbar = plt.colorbar()\n cbar.ax.set_ylabel('Counts')\n plt.show()\n\n\ndef hist(data, bins, color, weights, title, xt=None):\n n, bins, patches = plt.hist(data, bins=bins, rwidth=0.85, color=color, weights=weights)\n plt.gca().yaxis.set_major_formatter(PercentFormatter(1))\n plt.ylabel('Percentage')\n if xt is not None:\n plt.xticks(xt)\n plt.title(title)\n print(n, bins, patches)\n plt.show()\n\n\ndef get_jaccard_sim(str1, str2):\n a = set(str1.split())\n b = set(str2.split())\n c = a.intersection(b)\n return float(len(c)) / (len(a) + len(b) - len(c))\n\n\ndef jaccard_ngram(str1, str2):\n lista = str1.strip().split()\n listb = str2.strip().split()\n jaccardList = list()\n for i in range(1, 5):\n a = set(ngrams(lista, i))\n b = set(ngrams(listb, i))\n c = a.intersection(b)\n if (len(a) + len(b) - len(c)) > 0:\n \ttmp = float(len(c)) / (len(a) + len(b) - len(c))\n else:\n \ttmp = 0.0\n jaccardList.append(tmp)\n return sum(jaccardList) / len(jaccardList)\n\n\ndef calculate_edit_distance(org_code, cand_code):\n \"\"\"\n\tThe higher the score, the lower the similarity.\n\tPay attention to \\n symbol in the line\n\t\"\"\"\n org_parts = [part.strip() for part in org_code.strip().split()]\n cand_parts = [part.strip() for part in cand_code.strip().split()]\n\n def levenshteinDistance(s1, s2):\n if len(s1) > len(s2):\n s1, s2 = s2, s1\n\n distances = range(len(s1) + 1)\n for i2, c2 in enumerate(s2):\n distances_ = [i2 + 1]\n for i1, c1 in enumerate(s1):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))\n distances = distances_\n return distances[-1]\n\n return levenshteinDistance(org_parts, cand_parts)\n pass\n\n\ndef bleuScore(reference, hypothesis):\n # bAve = nltk.translate.bleu_score.sentence_bleu([reference], hypothesis)\n b1 = nltk.translate.bleu_score.sentence_bleu([reference], hypothesis, weights=(1, 0, 0, 0))\n if b1 < 0.01:\n b1 = 0\n b2 = nltk.translate.bleu_score.sentence_bleu([reference], hypothesis, weights=(0, 1, 0, 0))\n if b2 < 0.01:\n b2 = 0\n b3 = nltk.translate.bleu_score.sentence_bleu([reference], hypothesis, weights=(0, 0, 1, 0))\n if b3 < 0.01:\n b3 = 0\n b4 = nltk.translate.bleu_score.sentence_bleu([reference], hypothesis, weights=(0, 0, 0, 1))\n if b4 < 0.01:\n b4 = 0\n b = nltk.translate.bleu_score.sentence_bleu([reference], hypothesis, weights=(0.25, 0.25, 0.25, 0.25))\n if b < 0.01:\n b = 0\n return [b1, b2, b3, b4, b]\n\n\ndef Nminelements(list1, N, skip=True):\n # list1: [(score, lineNo)...]\n final_list = []\n\n for i in range(0, N):\n min1 = (float('Inf'), -1)\n\n for j in range(len(list1)):\n if list1[j][0] < min1[0]:\n min1 = list1[j]\n\n list1.remove(min1)\n if skip:\n # skip the first min because that is buggyline itself.\n if (i > 0):\n final_list.append(min1)\n else:\n final_list.append(min1)\n\n return final_list\n\n\ndef dataSampling():\n heldOut = list()\n test = list()\n with open(\"DataSplit/heldout_keys.txt\", 'r') as h:\n for key in h.readlines():\n k = key.strip()\n heldOut.append(k)\n\n with open(\"DataSplit/test_keys.txt\", 'r') as t:\n for key in t.readlines():\n k = key.strip()\n test.append(k)\n\n heldOut = set(heldOut)\n test = set(test)\n\n for file in glob.glob(\"Data/*.csv\")[1001:1501]:\n key = file.strip(\"Data/\").strip(\".csv\").replace('__', '/')\n if key not in heldOut and key not in test:\n # shutil.copy(file, \"SampledData/\")\n shutil.copy(file, \"SampledHeldOut/\")\n\n\ndef BPEinput(pattern, trainSize):\n buggyPrefix = \"Files/Files-pre\"\n inputFile = str(trainSize) + \"_BPEinput.txt\"\n with open(inputFile, 'w', encoding=\"ISO-8859-1\") as inpf:\n for file in glob.glob(pattern)[:trainSize]:\n with open(file) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n if row[\"is_fix\"] == \"True\" and row[\"lines_removed\"] == '1' and row[\"lines_added\"] == '1' and row[\n \"line_rm_start\"] == row[\"line_add_start\"]:\n organization = row[\"organization\"]\n project = row[\"project\"]\n commit = row[\"commit\"]\n fileName = row[\"file\"].replace(\"/\", \"__\")\n filePath = '#'.join([row[\"organization\"], row[\"project\"], row[\"commit\"], fileName])\n try:\n with open(os.path.join(buggyPrefix, filePath), 'r', encoding='ISO-8859-1') as bf:\n codeLines = bf.readlines()\n for cl in codeLines:\n if cl.strip() != \"\":\n cl = cl.replace('\\t', ' ')\n inpf.write(cl)\n except:\n continue\n\n\ndef tokenizeLine(line):\n token_string = \"\"\n try:\n tokens = list(javalang.tokenizer.tokenize(line))\n for token in tokens:\n v = token.value\n token_string = token_string + v + \"\\t\"\n except Exception as e:\n print(\"This Line cannot be parsed: \\n\" + line + \"\\n\")\n return token_string\n\n\ndef tokenize(file):\n f = open(file, \"r\", encoding='ISO-8859-1')\n file_lines = f.readlines()\n\n tokens_string = \"\"\n for line in file_lines:\n if line.strip().startswith('*') or \\\n line.strip().startswith('/*') or \\\n line.strip().startswith('//') or \\\n line.strip().startswith('*') or \\\n line.strip().startswith('/*') or \\\n line.strip().startswith('//') or \\\n line.strip().endswith('*/'):\n continue\n try:\n tokens = list(javalang.tokenizer.tokenize(line))\n for token in tokens:\n if isinstance(token, javalang.tokenizer.String):\n v = '\"str\"'\n else:\n v = token.value\n tokens_string = tokens_string + v + \"\\t\"\n except Exception as e:\n # print (\"A line cannot be parsed\")\n # sys.stderr.write(\"This Line cannot be parsed: \\n\" + line + \"\\n\")\n continue\n\n f.close()\n\n return tokens_string\n\n\ndef focusIdentifier(line):\n # focus on the change of identifiers\n tokens_string = \"\"\n try:\n tokens = list(javalang.tokenizer.tokenize(line))\n for token in tokens:\n if isinstance(token, javalang.tokenizer.Identifier):\n tokens_string = tokens_string + token.value + \" \"\n\n except Exception as e:\n sys.stderr.write(\"This Line cannot be parsed: \\n\" + line + \"\\n\")\n\n return tokens_string\n\n\ndef lexicalAnalysis(line):\n tokens_string = \"\"\n try:\n tokens = list(javalang.tokenizer.tokenize(line))\n\n for token in tokens:\n if isinstance(token, javalang.tokenizer.String):\n v = \"String\"\n elif isinstance(token, javalang.tokenizer.EndOfInput):\n v = \"EndOfInput\"\n elif isinstance(token, javalang.tokenizer.Keyword):\n # v = token.value\n v = \"Keyword\"\n elif isinstance(token, javalang.tokenizer.Modifier):\n # v = token.value\n v = \"Modifier\"\n elif isinstance(token, javalang.tokenizer.BasicType):\n # v = token.value\n v = \"BasicType\"\n elif isinstance(token, javalang.tokenizer.Literal):\n v = \"Literal\"\n elif isinstance(token, javalang.tokenizer.Integer):\n v = \"Integer\"\n elif isinstance(token, javalang.tokenizer.DecimalInteger):\n v = \"DecimalInteger\"\n elif isinstance(token, javalang.tokenizer.OctalInteger):\n v = \"OctalInteger\"\n elif isinstance(token, javalang.tokenizer.BinaryInteger):\n v = \"BinaryInteger\"\n elif isinstance(token, javalang.tokenizer.HexInteger):\n v = \"HexInteger\"\n elif isinstance(token, javalang.tokenizer.FloatingPoint):\n v = \"FloatingPoint\"\n elif isinstance(token, javalang.tokenizer.DecimalFloatingPoint):\n v = \"DecimalFloatingPoint\"\n elif isinstance(token, javalang.tokenizer.HexFloatingPoint):\n v = \"HexFloatingPoint\"\n elif isinstance(token, javalang.tokenizer.Boolean):\n # v = token.value\n v = \"Boolean\"\n elif isinstance(token, javalang.tokenizer.Character):\n v = \"Character\"\n elif isinstance(token, javalang.tokenizer.Null):\n v = \"Null\"\n elif isinstance(token, javalang.tokenizer.Separator):\n # v = v.value\n v = \"Separator\"\n elif isinstance(token, javalang.tokenizer.Operator):\n v = \"Operator\"\n elif isinstance(token, javalang.tokenizer.Annotation):\n v = \"Annotation\"\n elif isinstance(token, javalang.tokenizer.Identifier):\n v = \"Identifier\"\n else:\n print(\"Error\")\n print(type(token))\n\n tokens_string = tokens_string + v + \"\\t\"\n\n except Exception as e:\n sys.stderr.write(\"This Line cannot be parsed: \\n\" + line + \"\\n\")\n\n return tokens_string\n\n\ndef toJavaSourceCode(prediction):\n tokens = prediction.strip().split(\"\\t\")\n codeLine = \"\"\n delimiter = JavaDelimiter()\n for i in range(len(tokens)):\n if (i + 1 < len(tokens)):\n\n if (not isDelimiter(tokens[i])):\n if (not isDelimiter(tokens[i + 1])): # STR (i) + STR (i+1)\n codeLine = codeLine + tokens[i] + \" \"\n else: # STR(i) + DEL(i+1)\n codeLine = codeLine + tokens[i]\n else:\n if (tokens[i] == delimiter.varargs): # ... (i) + ANY (i+1)\n codeLine = codeLine + tokens[i] + \" \"\n elif (tokens[i] == delimiter.biggerThan): # > (i) + ANY(i+1)\n codeLine = codeLine + tokens[i] + \" \"\n elif (tokens[i] == delimiter.rightBrackets and i > 0):\n if (tokens[i - 1] == delimiter.leftBrackets): # [ (i-1) + ] (i)\n codeLine = codeLine + tokens[i] + \" \"\n else: # DEL not([) (i-1) + ] (i)\n codeLine = codeLine + tokens[i]\n else: # DEL not(... or ]) (i) + ANY\n codeLine = codeLine + tokens[i]\n else:\n codeLine = codeLine + tokens[i]\n return codeLine\n\n\ndef isDelimiter(token):\n return not token.upper().isupper()\n\n\nclass JavaDelimiter:\n @property\n def varargs(self):\n return \"...\"\n\n @property\n def rightBrackets(self):\n return \"]\"\n\n @property\n def leftBrackets(self):\n return \"[\"\n\n @property\n def biggerThan(self):\n return \">\"\n\n" }, { "alpha_fraction": 0.6261621117591858, "alphanum_fraction": 0.6492398381233215, "avg_line_length": 48.69565200805664, "blob_id": "0be49341c44d09ea4fed607cdcf9fba14c417a47", "content_id": "810ecff9b9eddadaf1ed836d9ce6d76175a0e728", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9143, "license_type": "permissive", "max_line_length": 277, "num_lines": 184, "path": "/Model/transformer_patching_model.py", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "import math\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom transformer import Transformer\n\nclass TransformerPatchingModel(tf.keras.Model):\n\tdef __init__(self, model_config, vocab_dim, is_pointer=False):\n\t\tsuper(TransformerPatchingModel, self).__init__()\n\t\tself.vocab_dim = vocab_dim\n\t\tself.is_pointer = is_pointer\n\t\tself.transformer_enc = Transformer(model_config, vocab_dim)\n\t\tself.transformer_dec = Transformer(model_config, vocab_dim, shared_embedding=self.transformer_enc.embed, bias_dim=4)\n\t\tif self.is_pointer:\n\t\t\tself.pointer_pred = tf.keras.layers.Dense(2)\n\t\n\[email protected](input_signature=[tf.TensorSpec(shape=(None, None), dtype=tf.int32), tf.TensorSpec(shape=(None, None), dtype=tf.int32), tf.TensorSpec(shape=(None, None), dtype=tf.int32), tf.TensorSpec(shape=(None, None), dtype=tf.int32), tf.TensorSpec(shape=None, dtype=tf.bool)])\n\tdef call(self, pre_indices, pre_locs, post_indices, pointer_locs, training=True):\n\t\tbatch_dim = tf.shape(pre_indices)[0]\n\t\tpre_len = tf.shape(pre_indices)[-1]\n\t\tpost_len = tf.shape(post_indices)[-1]\n\n\t\tenc_mask = tf.cast(tf.clip_by_value(pre_indices, 0, 1), 'float32')\n\t\tenc_mask = tf.reshape(enc_mask, [batch_dim, 1, 1, pre_len])\n\t\tenc_state = self.transformer_enc((pre_indices,), (enc_mask,), training)\n\t\t\n\t\tpost_mask = tf.cast(tf.clip_by_value(post_indices, 0, 1), 'float32')\n\t\tpost_mask = tf.reshape(post_mask, [batch_dim, 1, post_len, 1])\n\t\tpost_seq_mask = self.transformer_dec.get_sequence_mask(post_len)\n\t\tpost_seq_mask = tf.reshape(post_seq_mask, [1, 1, post_len, post_len])\n\t\tpost_enc_mask = enc_mask * post_mask\n\t\t\n\t\tstart_end_mask = tf.cast(tf.sequence_mask(pre_locs[:, 1], pre_len), 'int32')\n\t\tstart_end_mask -= tf.cast(tf.sequence_mask(pre_locs[:, 0]-1, pre_len), 'int32')\n\n\t\tin_biases = tf.tile(tf.expand_dims(start_end_mask, 1), [1, post_len, 1])\n\t\tin_biases = tf.cast(tf.where(tf.greater(in_biases, 0)), 'int32')\n\t\tin_biases = tf.pad(in_biases, [[0, 0], [0, 1]])\n\t\t\n\t\t# If predicting edits, update input biases to specifically identify the start and end (edit) pointers\n\t\tif tf.shape(pointer_locs)[0] > 0:\n\t\t\tloc_bias = tf.stack([tf.range(batch_dim), pointer_locs[:, 0]], axis=1)\n\t\t\tloc_bias = tf.scatter_nd(loc_bias, tf.ones(batch_dim), tf.shape(start_end_mask))\n\t\t\tloc_bias = tf.tile(tf.expand_dims(loc_bias, 1), [1, post_len, 1])\n\t\t\tloc_bias = tf.cast(tf.where(tf.greater(loc_bias, 0)), 'int32')\n\t\t\tloc_bias = tf.concat([loc_bias, tf.fill((tf.shape(loc_bias)[0], 1), 2, 'int32')], axis=1)\n\t\t\t\n\t\t\trem_bias = tf.stack([tf.range(batch_dim), pointer_locs[:, 1]], axis=1)\n\t\t\trem_bias = tf.scatter_nd(rem_bias, tf.ones(batch_dim), tf.shape(start_end_mask))\n\t\t\trem_bias = tf.tile(tf.expand_dims(rem_bias, 1), [1, post_len, 1])\n\t\t\trem_bias = tf.cast(tf.where(tf.greater(rem_bias, 0)), 'int32')\n\t\t\trem_bias = tf.concat([rem_bias, tf.fill((tf.shape(rem_bias)[0], 1), 3, 'int32')], axis=1)\n\t\t\t\n\t\t\tin_biases = tf.concat([in_biases, loc_bias, rem_bias], axis=0)\n\t\t\n\t\tdec_state = self.transformer_dec((post_indices, enc_state), (post_seq_mask, enc_mask), training, in_biases)\n\t\tpreds = self.transformer_dec.predict(dec_state)\n\t\tif self.is_pointer:\n\t\t\tpointer_preds = self.pointer_pred(enc_state)\n\t\t\tpointer_preds += (1.0 - tf.expand_dims(tf.cast(start_end_mask, 'float32'), -1)) * tf.float32.min\n\t\t\tpointer_preds = tf.transpose(pointer_preds, [0, 2, 1])\n\t\t\treturn preds, pointer_preds\n\t\telse:\n\t\t\treturn preds\n\t\n\t# Beam searches patches given a bug\n\tdef predict(self, vocabulary, pre_indices, pre_locs, beam_size, max_expansions):\n\t\tbatch_dim = int(tf.shape(pre_indices)[0].numpy())\n\t\tpre_len = tf.shape(pre_indices)[-1]\n\t\t\n\t\tstart_end_mask = tf.cast(tf.sequence_mask(pre_locs[:, 1], pre_len), 'int32')\n\t\tstart_end_mask -= tf.cast(tf.sequence_mask(pre_locs[:, 0]-1, pre_len), 'int32')\n\n\t\t# Pre-compute encoder states for all buggy lines\n\t\tpre_mask = tf.cast(tf.clip_by_value(pre_indices, 0, 1), \"float32\")\n\t\tpre_mask = tf.reshape(pre_mask, [batch_dim, 1, 1, pre_len])\n\t\tpre_states = self.transformer_enc((pre_indices,), (pre_mask,), training=False)\n\t\t\n\t\tif self.is_pointer:\n\t\t\tpointer_preds = self.pointer_pred(pre_states)\n\t\t\tpointer_preds += (1.0 - tf.expand_dims(tf.cast(start_end_mask, 'float32'), -1)) * tf.float32.min\n\t\t\tpointer_preds = tf.transpose(pointer_preds, [0, 2, 1])\n\t\t\n\t\tpre_states = tf.split(pre_states, batch_dim)\n\t\tpre_mask = tf.split(pre_mask, batch_dim)\n\t\tstart_end_mask = tf.expand_dims(start_end_mask, 1)\n\t\tstart_end_mask = tf.split(start_end_mask, batch_dim)\n\t\t\n\t\tif self.is_pointer:\n\t\t\tper_side = math.ceil(math.sqrt(beam_size))\n\t\t\tprobs, ixes = tf.math.top_k(tf.nn.softmax(pointer_preds), k=per_side)\n\t\t\tprobs = tf.transpose(probs, [0, 2, 1]).numpy()\n\t\t\tixes = tf.transpose(ixes, [0, 2, 1]).numpy()\n\t\t\tresults = {}\n\t\t\tfor ix in range(batch_dim):\n\t\t\t\tresults[ix] = []\n\t\t\t\tfor i in range(per_side):\n\t\t\t\t\tfor j in range(per_side):\n\t\t\t\t\t\tpointers = [ixes[ix][i][0], ixes[ix][j][1]]\n\t\t\t\t\t\tentropy = -math.log2(probs[ix][i][0] * probs[ix][j][1] + 1e-7)\n\t\t\t\t\t\tsample = (entropy, False, pointers, [vocabulary.w2i[\"<s>\"]])\n\t\t\t\t\t\tresults[ix].append(sample)\n\t\t\t\t\t\tif len(results[ix]) == beam_size: break\n\t\t\t\t\tif len(results[ix]) == beam_size: break\n\t\telse:\n\t\t\tresults = {ix: [(0.0, False, [], [vocabulary.w2i[\"<s>\"]])] for ix in range(batch_dim)} # source_ix: (entropy, is_completed, indices)\n\t\t\n\t\tfor step in range(max_expansions):\n\t\t\t# Extract sequences to complete (those that aren't done yet)\n\t\t\tto_process = {ix: [r for r in res if not r[1]] for ix, res in results.items()}\n\t\t\tto_process = {ix: res for ix, res in to_process.items() if len(res) > 0}\n\t\t\tif len(to_process) == 0: break\n\t\t\t\n\t\t\tin_states = tf.concat([tf.tile(pre_states[ix], [len(to_proc), 1, 1]) for ix, to_proc in to_process.items()], axis=0)\n\t\t\tin_masks = tf.concat([tf.tile(pre_mask[ix], [len(to_proc), 1, 1, 1]) for ix, to_proc in to_process.items()], axis=0)\n\t\t\tin_start_end_mask = tf.concat([tf.tile(start_end_mask[ix], [len(to_proc), 1, 1]) for ix, to_proc in to_process.items()], axis=0)\n\t\t\tif self.is_pointer:\n\t\t\t\tin_pointer_locs = tf.stack([t[2] for to_proc in to_process.values() for t in to_proc], axis=0)\n\t\t\t\tbatch_indices = tf.range(sum(len(to_proc) for to_proc in to_process.values()))\n\t\t\t\n\t\t\t# Flatten input for easy access\n\t\t\tto_process = [(ix, *r) for ix, res in to_process.items() for r in res]\n\t\t\tpost_indices = tf.constant([inp[-1] for inp in to_process])\n\t\t\t\n\t\t\t# Run decoding transformer\n\t\t\tpost_masks = tf.ones_like(post_indices, dtype='float32')\n\t\t\tpost_masks = tf.expand_dims(tf.expand_dims(post_masks, 1), 1)\n\t\t\tin_biases = tf.tile(in_start_end_mask, [1, step + 1, 1])\n\t\t\tin_biases = tf.cast(tf.where(tf.greater(in_biases, 0)), 'int32')\n\t\t\tin_biases = tf.pad(in_biases, [[0, 0], [0, 1]])\n\t\t\tif self.is_pointer:\n\t\t\t\tin_start_end_mask = tf.squeeze(in_start_end_mask, 1)\n\t\t\t\tloc_bias = tf.stack([batch_indices, in_pointer_locs[:, 0]], axis=1)\n\t\t\t\tloc_bias = tf.scatter_nd(loc_bias, tf.ones(tf.shape(loc_bias)[0]), tf.shape(in_start_end_mask))\n\t\t\t\tloc_bias = tf.tile(tf.expand_dims(loc_bias, 1), [1, step + 1, 1])\n\t\t\t\tloc_bias = tf.cast(tf.where(tf.greater(loc_bias, 0)), 'int32')\n\t\t\t\tloc_bias = tf.concat([loc_bias, tf.fill((tf.shape(loc_bias)[0], 1), 2, 'int32')], axis=1)\n\t\t\t\t\n\t\t\t\trem_bias = tf.stack([batch_indices, in_pointer_locs[:, 1]], axis=1)\n\t\t\t\trem_bias = tf.scatter_nd(rem_bias, tf.ones(tf.shape(rem_bias)[0]), tf.shape(in_start_end_mask))\n\t\t\t\trem_bias = tf.tile(tf.expand_dims(rem_bias, 1), [1, step + 1, 1])\n\t\t\t\trem_bias = tf.cast(tf.where(tf.greater(rem_bias, 0)), 'int32')\n\t\t\t\trem_bias = tf.concat([rem_bias, tf.fill((tf.shape(rem_bias)[0], 1), 3, 'int32')], axis=1)\n\t\t\t\t\n\t\t\t\tin_biases = tf.concat([in_biases, loc_bias, rem_bias], axis=0)\n\t\t\t\n\t\t\tpost_states = self.transformer_dec((post_indices, in_states), (post_masks, in_masks,), False, in_biases)\n\t\t\tpost_states = post_states[:, -1:] # We are only interested in the last value\n\t\t\tpreds = self.transformer_dec.predict(post_states)\n\t\t\tpreds = tf.squeeze(preds, 1)\n\t\t\t\n\t\t\t# Get top-k predictions for each bug-fix\n\t\t\tprobs, ixes = tf.math.top_k(tf.nn.softmax(preds), k=max(25, beam_size))\n\t\t\tnew_results = {}\n\t\t\tprobs = probs.numpy()\n\t\t\tixes = ixes.numpy()\n\t\t\tnew_results = {}\n\t\t\tfor seq_ix in range(len(probs)):\n\t\t\t\told_patch = to_process[seq_ix]\n\t\t\t\tindex = old_patch[0]\n\t\t\t\tres = results[index]\n\t\t\t\tdel res[res.index(old_patch[1:])]\n\t\t\t\tif index not in new_results:\n\t\t\t\t\tnew_results[index] = []\n\t\t\t\tfor pred_ix in range(beam_size):\n\t\t\t\t\tprob = probs[seq_ix][pred_ix]\n\t\t\t\t\tpred = ixes[seq_ix][pred_ix]\n\t\t\t\t\tent = -math.log2(prob + 1e-7)\n\t\t\t\t\tis_done = vocabulary.i2w[pred] == \"</s>\"\n\t\t\t\t\tnew_results[index].append((old_patch[1] + ent, is_done, old_patch[3], list(old_patch[4])+[pred]))\n\t\t\tfor seq_ix in results.keys():\n\t\t\t\tif seq_ix not in new_results:\n\t\t\t\t\tcontinue\n\t\t\t\tresults[seq_ix].extend(new_results[seq_ix])\n\t\t\t\tresults[seq_ix] = sorted(results[seq_ix], key=lambda res: res[0])[:beam_size]\n\t\t\n\t\t# Clean up results before returning.\n\t\tfor ix in results.keys():\n\t\t\tresults[ix] = [(res[0], res[2], res[-1][1:]) for res in results[ix]]\n\t\t\tresults[ix] = sorted(results[ix], key=lambda res: res[0])\n\t\t\tresults[ix] = results[ix][:beam_size]\n\t\tresults = [results[ix] for ix in sorted(results.keys())]\n\t\treturn results" }, { "alpha_fraction": 0.6239272952079773, "alphanum_fraction": 0.6350328326225281, "avg_line_length": 33.73684310913086, "blob_id": "197057c81fbced0cbd0526cfa85926ed2d14cc79", "content_id": "c6c665f73bffef83bb765cc078087f7b0b7ba2cd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1981, "license_type": "permissive", "max_line_length": 161, "num_lines": 57, "path": "/Model/tracker.py", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "import os\nimport time\n\nimport tensorflow as tf\n\nclass Tracker(object):\n\tdef __init__(self, model, model_path='models', log_path='log.txt', suffix=None):\n\t\tself.log_path = log_path\n\t\tself.model_path = model_path\n\t\tif suffix is not None:\n\t\t\tif self.log_path.endswith(\".txt\"):\n\t\t\t\tself.log_path = self.log_path[:-4] + \"-\" + suffix + \".txt\"\n\t\t\telse:\n\t\t\t\tself.log_path += \"-\" + suffix\n\t\t\tself.model_path += \"-\" + suffix\n\t\tself.ckpt = tf.train.Checkpoint(model=model, step=tf.Variable(0), samples=tf.Variable(0), time=tf.Variable(0.0))\n\t\tself.manager = tf.train.CheckpointManager(self.ckpt, self.model_path, max_to_keep=None)\n\t\t\n\t\tself.log = []\n\t\tif os.path.exists(self.log_path):\n\t\t\twith open(self.log_path) as f:\n\t\t\t\tfor l in f:\n\t\t\t\t\tl = l.rstrip().split(': ')\n\t\t\t\t\tscores = [float(v.replace('%', ''))/100 if '%' in v else v for v in l[1].split(',')]\n\t\t\t\t\tself.log.append((l[0], scores))\n\t\n\tdef restore(self, best_only=False):\n\t\tif self.manager.checkpoints:\n\t\t\tif best_only:\n\t\t\t\tbest = max(enumerate(self.log), key=lambda e: e[1][1][0])[0] # e[1][1] just selects the list of accuracies; the final index controls the specific value used.\n\t\t\t\tprint(\"Restoring top checkpoint:\", best + 1)\n\t\t\t\tstatus = self.ckpt.restore(self.manager.checkpoints[best])\n\t\t\telse:\n\t\t\t\tstatus = self.ckpt.restore(self.manager.latest_checkpoint)\n\t\t\tstatus.assert_existing_objects_matched()\n\t\t\tstatus.assert_consumed()\t\t\n\t\tself.time = time.time()\n\t\n\tdef update(self, model, scores):\n\t\tself.ckpt.step.assign_add(1)\n\t\tself.ckpt.time.assign_add(time.time() - self.time)\n\t\tself.time = time.time()\n\t\t\n\t\ts = self.ckpt.step.numpy()\n\t\tc = self.ckpt.samples.numpy()\n\t\tt = self.ckpt.time.numpy()\n\t\tself.log.append(((s, t), scores))\n\t\twith open(self.log_path, 'a') as f:\n\t\t\tf.write(str(s))\n\t\t\tf.write(', ')\n\t\t\tf.write(str(c))\n\t\t\tf.write(', ')\n\t\t\tf.write('{0:.3f}'.format(t))\n\t\t\tf.write(': ')\n\t\t\tf.write(', '.join([s if isinstance(s, str) else '{0:.2%}'.format(s) for s in scores]))\n\t\t\tf.write('\\n')\n\t\tself.manager.save()\n\n" }, { "alpha_fraction": 0.7751299738883972, "alphanum_fraction": 0.7768630981445312, "avg_line_length": 191.4166717529297, "blob_id": "19ef6966d710c43f2c3e7b65e0ab2a426c96d604", "content_id": "b6a3941b41c8532e8ffc03bc22fad2995fc129ee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2308, "license_type": "permissive", "max_line_length": 690, "num_lines": 12, "path": "/Model/README.md", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "# Modeling Code\nThis folder contains the code to train and test our models. All hyper-parameter configurations can be set in `config.yml`, which currently contains the default settings used in the papers. You can set the context and/or edit enhancements there too, under `data` -> `add_context` and `edits`.\n\n## Training\nTo train our models from scratch, run `train_model.py` with the (provided) data file and vocabulary as arguments. This will run through the specified number of epochs (see `config.yml`) and validate the model's performance on held-out data every epoch, after which it writes both a log entry (in `log.txt`) and stores the latest model (under `models/`). Use the optional `-s|--suffix` flag to specify a descriptive name for the log and model locations, such as `-s edits-context`.\n\nDuring training, the model periodically (every `print_freq` minibatches, see `config.yml`) prints its metrics to track progress. Once every 5 print steps, it produces an example, which consists of the buggy line and the predicted (teacher-forced) repair tokens. When edit-based repairing, the buggy line is annotated with the real pointers (`^` for start and `$` for end) and the predicted ones (`>` and `<` respectively). When validating, the model additionally beam-searches on every sample and prints/logs the corresponding top-K accuracy at the end. It may be worthwhile to reduce the config's `beam_size` just for training (from the default 25 to e.g. 5) to speed up the held-out pass.\n\n## Testing\nTo generate the top-K (beam searched) patches for all test data, run `evaluate_model.py`, again with the same parameters. This runner creates an output file, named `results.txt`, again with optional suffix, with all the produced patches; it is particularly useful because it translates the model-produced edit (in terms of pointer and tokens) combined with the bug into the corresponding patch. For convenience, it prints the top-generated patch to console while producing patches.\n\nThe results file is formatted as follows: each bug is preceded by a whiteline, followed by the tab-separated tokens of that bug. Then, the `beam_size` top patches follow, each starting with the probability of that patch, as a percentage rounded to two decimals, followed by `: ` and then the tab-separated patch tokens." }, { "alpha_fraction": 0.6712046265602112, "alphanum_fraction": 0.6988449096679688, "avg_line_length": 42.28571319580078, "blob_id": "756118eb3f4ee06c95687464eb28b0b4cf3e2889", "content_id": "d4de7ebdb5c91478ea22aa9e9b661e8fe82dbb86", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2424, "license_type": "permissive", "max_line_length": 170, "num_lines": 56, "path": "/Model/metrics.py", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow as tf\n\nlog_2_e = 1.44269504089 # Constant to convert to binary entropies\n\nclass MetricsTracker():\n\tdef __init__(self, top_10=False):\n\t\tself.total_tokens = 0\n\t\tself.total_samples = 0\n\t\tself.flush()\n\t\n\tdef flush(self, flush_totals=False):\n\t\tif flush_totals:\n\t\t\tself.total_tokens = 0\n\t\t\tself.total_samples = 0\n\t\tself.loss = 0.0\n\t\tself.acc = 0.0\n\t\tself.pointer_acc = 0.0\n\t\tself.pointer_acc_count = 0\n\t\tself.acc_count = 0\n\t\tself.total_acc = 0.0\n\t\tself.total_acc_count = 0\n\t\n\tdef add_observation(self, num_tokens, targets, predictions, pointer_locs=None, pointer_preds=None):\n\t\tpadding = tf.cast(tf.clip_by_value(targets, 0, 1), \"float32\")\n\t\ttarget_tensor = tf.cast(tf.one_hot(targets, predictions.shape[-1]), \"float32\")\n\t\t\n\t\t# Compute overall statistics, gathering types and predictions accordingly\n\t\tself.loss += log_2_e * tf.reduce_sum(padding*tf.nn.softmax_cross_entropy_with_logits(labels=target_tensor, logits=predictions))\n\t\tself.acc += tf.reduce_sum(padding*tf.metrics.sparse_categorical_accuracy(tf.constant(targets), predictions))\n\t\tself.acc_count += tf.reduce_sum(padding).numpy()\n\t\twhole_seq_acc = tf.reduce_prod((1-padding) + padding*tf.metrics.sparse_categorical_accuracy(tf.constant(targets), predictions), -1)\n\t\t\n\t\tif pointer_locs is not None:\n\t\t\tpointers_acc = tf.metrics.sparse_categorical_accuracy(pointer_locs, pointer_preds)\n\t\t\tpointers_acc = tf.reduce_prod(pointers_acc, -1)\n\t\t\tself.pointer_acc += tf.reduce_sum(pointers_acc)\n\t\t\tself.pointer_acc_count += targets.shape[0]\n\t\t\twhole_seq_acc *= pointers_acc\n\t\t\n\t\tself.total_acc += tf.reduce_sum(whole_seq_acc)\n\t\tself.total_acc_count += targets.shape[0]\n\t\t\n\t\tself.total_tokens += num_tokens\n\t\tself.total_samples += len(targets)\n\t\n\tdef get_stats(self):\n\t\tavg_loss = self.loss.numpy()/self.acc_count if self.acc_count > 0 else 0.0\n\t\tavg_acc = self.acc.numpy()/self.acc_count if self.acc_count > 0 else 0.0\n\t\tavg_total_acc = self.total_acc.numpy()/self.total_acc_count if self.total_acc_count > 0 else 0.0\n\t\t\n\t\tif self.pointer_acc_count > 0:\n\t\t\tavg_pointer_acc = self.pointer_acc.numpy() / self.pointer_acc_count\n\t\t\treturn self.total_samples, self.total_tokens, \"{0:.3f}\".format(avg_loss), \"{0:.2%}\".format(avg_acc), \"{0:.2%}\".format(avg_pointer_acc), \"{0:.2%}\".format(avg_total_acc)\n\t\telse:\n\t\t\treturn self.total_samples, self.total_tokens, \"{0:.3f}\".format(avg_loss), \"{0:.2%}\".format(avg_acc), \"{0:.2%}\".format(avg_total_acc)\n" }, { "alpha_fraction": 0.6865203976631165, "alphanum_fraction": 0.7304075360298157, "avg_line_length": 34.44444274902344, "blob_id": "5ea8f8a30b26d1acb2260a0294fbbe9b577e4182", "content_id": "4140154894bf94445b0bf11adee629f97f611dd2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 319, "license_type": "permissive", "max_line_length": 138, "num_lines": 9, "path": "/Analysis/README.md", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "### The code in this repository is for the replication of section 4.1 and 4.2 results.\n\n### Usage\n1. Download the data.\n2. Open ```code/real-fix-analysis.py``` and check line 284-307. The comment indicates how to reproduce the paper results for each section.\n3. Run the script\n```\npython3 code/real-fix-analysis.py\n```\n" }, { "alpha_fraction": 0.6347548365592957, "alphanum_fraction": 0.6447635889053345, "avg_line_length": 36.26486587524414, "blob_id": "2b69fa5de6c28274a87560ab5a4ca77aba6b1024", "content_id": "03edb0fc46188fdd71603ca6e178e9f45cf5fee9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6896, "license_type": "permissive", "max_line_length": 132, "num_lines": 185, "path": "/Model/data_reader.py", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "import os\nimport random\nrandom.seed(41)\n\nimport tensorflow as tf\n\nfrom vocabulary import VocabularyBuilder\n\nclass DataReader(object):\n\t\n\tdef __init__(self, data_config, data_file, vocab_path):\n\t\tself.config = data_config\n\t\tself.add_context = data_config[\"add_context\"]\n\t\tself.train_data, self.valid_data, self.test_data = self.load_processed_data(data_file)\n\t\t\n\t\tself.vocabulary = VocabularyBuilder(vocab_path=vocab_path)\n\t\tprint(\"Finished loading data, sizes:\")\n\t\tprint(\"Vocabulary: %d\" % self.vocabulary.vocab_dim)\n\t\tprint(\"Lines: %d\" % len(self.train_data))\n\t\tprint(\"Words: %d\" % sum([len([w for w in l1 if w != '\\n']) + len([w for w in l2 if w != '\\n']) for l1, _, l2 in self.train_data]))\n\t\n\tdef load_processed_data(self, data_file):\n\t\tdata = {}\n\t\twith open(data_file, \"r\", encoding=\"utf-8\") as f:\n\t\t\tix = 0\n\t\t\tfor l in f:\n\t\t\t\tif len(l.strip()) == 0: continue\n\t\t\t\ttry:\n\t\t\t\t\tix += 1\n\t\t\t\t\tp = l.rstrip().split(\"###\")\n\t\t\t\t\tkey = p[0]\n\t\t\t\t\tinds = [int(ix) for ix in p[2].split(\"\\t\")]\n\t\t\t\t\tbug = p[1].split(\"\\t\")[inds[0]:inds[1]]\n\t\t\t\t\tpatch = p[3].split(\"\\t\")[1:-1]\n\t\t\t\t\tpre, post = (p[1].split(\"\\t\"), p[3].split(\"\\t\"))\n\t\t\t\t\tpre_locs = [int(i) for i in p[2].split(\"\\t\")]\n\t\t\t\t\tif key not in data: data[key] = []\n\t\t\t\t\tif not self.add_context:\n\t\t\t\t\t\tpre = pre[pre_locs[0]:pre_locs[1]+1]\n\t\t\t\t\t\tpre_locs = [0, pre_locs[1] - pre_locs[0]]\n\t\t\t\t\tdata[key].append((pre, pre_locs, post))\n\t\t\t\texcept Exception:\n\t\t\t\t\tpass\n\t\treturn self.split_data(data, os.path.dirname(os.path.realpath(data_file)))\n\t\n\tdef split_data(self, data, data_dir):\n\t\tall_keys = list(data.keys())\n\t\tvalid_keys = []\n\t\ttest_keys = []\n\t\twith open(os.path.join(data_dir, \"heldout_keys.txt\")) as f:\n\t\t\tfor l in f:\n\t\t\t\tif l.rstrip() not in data:\n\t\t\t\t\tcontinue\n\t\t\t\tvalid_keys.append(l.rstrip())\n\t\twith open(os.path.join(data_dir, \"test_keys.txt\")) as f:\n\t\t\tfor l in f:\n\t\t\t\tif l.rstrip() not in data:\n\t\t\t\t\tcontinue\n\t\t\t\ttest_keys.append(l.rstrip())\n\t\ttrain_keys = [k for k in all_keys if k not in valid_keys and k not in test_keys]\n\t\treturn [e for key in train_keys for e in data[key]], \\\n\t\t\t\t[e for key in valid_keys for e in data[key]], \\\n\t\t\t\t[e for key in test_keys for e in data[key]]\n\t\n\tdef batcher(self, mode=\"train\", optimize_packing=True):\n\t\tif self.config[\"edits\"]:\n\t\t\tot = (tf.int32, tf.int32, tf.int32, tf.int32)\n\t\telse:\n\t\t\tot = (tf.int32, tf.int32, tf.int32)\n\t\tds = tf.data.Dataset.from_generator(self.batch_generator, output_types=ot, args=(mode, optimize_packing))\n\t\tds = ds.prefetch(1)\n\t\treturn ds\n\n\tdef batch_generator(self, mode=\"train\", optimize_packing=True):\n\t\tif isinstance(mode, bytes): mode = mode.decode('utf-8')\n\t\tif mode == \"train\":\n\t\t\tbatch_data = self.train_data\n\t\t\trandom.shuffle(batch_data)\n\t\telif mode == \"test\":\n\t\t\tbatch_data = self.test_data\n\t\telse:\n\t\t\tbatch_data = self.valid_data\n\n\t\tdef sample_len(sample):\n\t\t\treturn len(sample[0]) + len(sample[2])\n\t\t\n\t\tdef find_simple_diff(pre_tokens, post_tokens):\n\t\t\tadds = []\n\t\t\tprefix = 0\n\t\t\twhile pre_tokens[:prefix + 1] == post_tokens[:prefix + 1]:\n\t\t\t\tprefix += 1\n\t\t\t# Make sure the prefix pointer doesn't point past the end of the line if only tokens were added\n\t\t\tif prefix == len(pre_tokens):\n\t\t\t\tprefix -= 1\n\t\t\tsuffix = 0\n\t\t\twhile pre_tokens[-suffix - 1:] == post_tokens[-suffix - 1:]:\n\t\t\t\tsuffix += 1\n\t\t\t\n\t\t\t# This can happen if e.g. the inserted/deleted repeat the last prefix token(s);\n\t\t\t# in that case, arbitraly assume that the prefix is unchanged and the suffix should be inserted/deleted.\n\t\t\tif len(pre_tokens) - suffix < prefix:\n\t\t\t\tsuffix = len(pre_tokens) - prefix\n\t\t\tif len(post_tokens) - suffix < prefix:\n\t\t\t\tsuffix = len(post_tokens) - prefix\n\t\t\t\n\t\t\tpre_diff = pre_tokens[prefix:len(pre_tokens) - suffix]\n\t\t\tpost_diff = post_tokens[prefix:len(post_tokens) - suffix]\n\t\t\tif post_diff:\n\t\t\t\tadds = post_diff\n\t\t\tif pre_diff:\n\t\t\t\tdel_end = prefix + len(pre_diff) - 1\n\t\t\telse:\n\t\t\t\tdel_end = 0\n\t\t\tadds.insert(0, self.vocabulary.w2i['<s>'])\n\t\t\tadds.append(self.vocabulary.w2i['</s>'])\n\t\t\treturn prefix, del_end, adds\n\t\t\n\t\tdef make_batch(buffer):\n\t\t\tif optimize_packing:\n\t\t\t\tpivot = random.choice(buffer)\n\t\t\t\tbuffer = sorted(buffer, key=lambda b: abs(sample_len(b) - sample_len(pivot)))\n\t\t\tmax_seq_len = 0\n\t\t\tindices_pre = []\n\t\t\tindices_post = []\n\t\t\tpre_locs = []\n\t\t\tif self.config[\"edits\"]:\n\t\t\t\tpointer_locs = []\n\t\t\tfor pre, (pre_start, pre_end), post in buffer:\n\t\t\t\t# Sub-tokenize input and update start/end pointers\n\t\t\t\ttokenized = [self.vocabulary.tokenize(w) for w in pre]\n\t\t\t\tnew_start = sum(len(w) for w in tokenized[:pre_start])\n\t\t\t\tnew_end = new_start + sum(len(w) for w in tokenized[pre_start:pre_end+1])\n\t\t\t\tif new_end - new_start > self.config['max_bug_length']:\n\t\t\t\t\tcontinue\n\t\t\t\ttokenized = [self.vocabulary.vocab_key(s) for w in tokenized for s in w]\n\t\t\t\tpost_tokens = [self.vocabulary.vocab_key(s) for w in post for s in self.vocabulary.tokenize(w)]\n\t\t\t\t\n\t\t\t\tseq_len = min(self.config['max_context_length'], len(tokenized)) + len(post_tokens)\n\t\t\t\tmax_seq_len = max(max_seq_len, seq_len)\n\t\t\t\tif len(indices_pre) > 0 and max_seq_len * (len(indices_pre) + 1) > self.config['max_batch_size']:\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\t# Remove tokens outside the context window (if any) and re-compute offsets, as symmetrically as possible without wasting space\n\t\t\t\tif len(tokenized) > self.config['max_context_length']:\n\t\t\t\t\tcontext_available = self.config['max_context_length'] - (new_end - new_start)\n\t\t\t\t\tmax_right = min(context_available, len(tokenized) - new_end)\n\t\t\t\t\tleft_available = max(context_available//2, context_available - max_right)\n\t\t\t\t\tleft_bound = max(0, new_start - left_available)\n\t\t\t\t\tright_available = context_available - left_available\n\t\t\t\t\tright_bound = min(len(tokenized), new_end + right_available)\n\t\t\t\t\ttokenized = tokenized[left_bound:right_bound]\n\t\t\t\t\tnew_start -= left_bound\n\t\t\t\t\tnew_end -= left_bound\n\t\t\t\t\n\t\t\t\t# Replace target with edits, if applicable\n\t\t\t\tif self.config[\"edits\"]:\n\t\t\t\t\tdiff = find_simple_diff(tokenized[new_start:new_end], post_tokens[1:-1])\n\t\t\t\t\tpivot, del_end, post_tokens = diff\n\t\t\t\t\tpointer_locs.append([pivot + new_start, del_end + new_start])\n\t\t\t\tindices_pre.append(tokenized)\n\t\t\t\tindices_post.append(post_tokens)\n\t\t\t\tpre_locs.append([new_start, new_end])\n\t\t\t\n\t\t\t# Remove batch sequences from buffer and convert to tensors\n\t\t\tif not indices_pre: return (None, None)\n\t\t\tbuffer = buffer[len(indices_pre):]\n\t\t\tpre = tf.ragged.constant(indices_pre).to_tensor()\n\t\t\tpost = tf.ragged.constant(indices_post, dtype=\"int32\").to_tensor()\n\t\t\tif self.config[\"edits\"]:\n\t\t\t\tbatch = (pre, pre_locs, post, pointer_locs)\n\t\t\telse:\n\t\t\t\tbatch = (pre, pre_locs, post)\n\t\t\treturn buffer, batch\n\t\t\n\t\tbuffer = []\n\t\tfor l in batch_data:\n\t\t\tbuffer.append(l)\n\t\t\tif sum(sample_len(l) for l in buffer) > self.config[\"max_buffer_size\"]*self.config['max_batch_size']:\n\t\t\t\tbuffer, batch = make_batch(buffer)\n\t\t\t\tif batch is None: break\n\t\t\t\tyield batch\n\t\twhile buffer:\n\t\t\tbuffer, batch = make_batch(buffer)\n\t\t\tif not batch: break\n\t\t\tyield batch\n" }, { "alpha_fraction": 0.7481527328491211, "alphanum_fraction": 0.7697044610977173, "avg_line_length": 111, "blob_id": "ca4803c2f271a52fb63c994c78ac7cc4490f1691", "content_id": "8f91dc10174cfd33b954f162a2df4a568ff1964e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3248, "license_type": "permissive", "max_line_length": 690, "num_lines": 29, "path": "/README.md", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "# Patching as Translation: The Data and the Metaphor ([ASE'20](https://conf.researchr.org/details/ase-2020/ase-2020-papers/51/Patching-as-Translation-The-Data-and-the-Metaphor))\n\nThis repository is for the replication materials and data of ASE'20 paper: \"Patching as translation: The Data and the Metaphor\". See the [paper](https://arxiv.org/abs/2008.10707) for details \n\nThe code is divided into two parts: Models and Analysis. Analysis contains the code for the results in section 4.1 & 4.2. Models contains the code for section 4.3 and section 5. Please refer to the README.md in each directory for instructions.\n\n## Data\n - [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3830095.svg)](https://doi.org/10.5281/zenodo.3830095)\n\n## Reproducing Analysis part\n1. Download the data.\n2. Open ```code/real-fix-analysis.py``` and check line 284-307. The comment indicates how to reproduce the paper results for each section.\n3. Run the script\n```\npython3 code/real-fix-analysis.py\n```\n\n## Reproducing Model part\nAll hyper-parameter configurations can be set in `config.yml`, which currently contains the default settings used in the papers. You can set the context and/or edit enhancements there too, under `data` -> `add_context` and `edits`.\n\n### Training\nTo train our models from scratch, run `train_model.py` with the (provided) data file and vocabulary as arguments. This will run through the specified number of epochs (see `config.yml`) and validate the model's performance on held-out data every epoch, after which it writes both a log entry (in `log.txt`) and stores the latest model (under `models/`). Use the optional `-s|--suffix` flag to specify a descriptive name for the log and model locations, such as `-s edits-context`.\n\nDuring training, the model periodically (every `print_freq` minibatches, see `config.yml`) prints its metrics to track progress. Once every 5 print steps, it produces an example, which consists of the buggy line and the predicted (teacher-forced) repair tokens. When edit-based repairing, the buggy line is annotated with the real pointers (`^` for start and `$` for end) and the predicted ones (`>` and `<` respectively). When validating, the model additionally beam-searches on every sample and prints/logs the corresponding top-K accuracy at the end. It may be worthwhile to reduce the config's `beam_size` just for training (from the default 25 to e.g. 5) to speed up the held-out pass.\n\n### Testing\nTo generate the top-K (beam searched) patches for all test data, run `evaluate_model.py`, again with the same parameters. This runner creates an output file, named `results.txt`, again with optional suffix, with all the produced patches; it is particularly useful because it translates the model-produced edit (in terms of pointer and tokens) combined with the bug into the corresponding patch. For convenience, it prints the top-generated patch to console while producing patches.\n\nThe results file is formatted as follows: each bug is preceded by a whiteline, followed by the tab-separated tokens of that bug. Then, the `beam_size` top patches follow, each starting with the probability of that patch, as a percentage rounded to two decimals, followed by `: ` and then the tab-separated patch tokens.\n" }, { "alpha_fraction": 0.5650378465652466, "alphanum_fraction": 0.5836200714111328, "avg_line_length": 41.589576721191406, "blob_id": "537b4d6bfad0fe9f5195239871a4cc4e7ecf6b3e", "content_id": "0c2556d79c897328dc471a86c9430b63bbfefa81", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13077, "license_type": "permissive", "max_line_length": 133, "num_lines": 307, "path": "/Analysis/code/real-fix-analysis.py", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom util import *\n\n\ndef semanticAmbiguity(csvFile):\n with open(csvFile, 'r') as csvf:\n reader = csv.DictReader(csvf, delimiter='\\t')\n with open(\"similarHeldout2Train_similar_same.csv\", 'w') as csvw:\n writer = csv.DictWriter(csvw, fieldnames=['bug', 'sim-bug', 'patch', 'sim-patch', 'b1', 'b2', 'b3', 'b4',\n 'b-BLEU-cum', 'b-BLEU-ave', 'b-jaccard' ,'p1', 'p2', 'p3', 'p4', 'p-BLEU-cum',\n 'p-jaccard', 'p-BLEU-ave'], delimiter='\\t')\n writer.writeheader()\n for row in reader:\n if float(row['b-BLEU-ave']) >= 1.0:\n writer.writerow(row)\n semanticSpace(\"similarHeldout2Train_similar_same.csv\")\n\n\ndef semanticSpace(csvFile):\n pBleuCum = list()\n pBleuAve = list()\n with open(csvFile, 'r') as csvf:\n reader = csv.DictReader(csvf, delimiter='\\t')\n for row in reader:\n pBleuCum.append(float(row[\"p-BLEU-cum\"]))\n pBleuAve.append(float(row[\"p-BLEU-ave\"]))\n #hist(pBleuCum, [0.0, 0.2, 0.4, 0.6, 0.8, 1.01], \"blue\", np.ones(len(pBleuCum)) / len(pBleuCum),\n #'cumulative BLEU similarity')\n hist(pBleuAve, [0.0, 0.5, 1.01], \"green\", np.ones(len(pBleuAve)) / len(pBleuAve),\n 'average BLEU similarity')\n #hist(pBleuCum, [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1], \"blue\", np.ones(len(pBleuCum)) / len(pBleuCum),\n #'cumulative BLEU similarity')\n #hist(pBleuAve, [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1], \"green\", np.ones(len(pBleuAve)) / len(pBleuAve),\n #'average BLEU similarity')\n\n\ndef bugPatchSim(buggyFile, patchFile):\n bleuList = list()\n editList = list()\n jaccardList = list()\n bleuBin = np.arange(0, 1.2, 0.1)\n editBin = np.arange(0.1, 22.1, 2)\n jaccardBin = np.arange(0, 1.2, 0.1)\n\n with open(buggyFile, 'r') as bf:\n buggyLines = bf.readlines()\n with open(patchFile, 'r') as pf:\n patchLines = pf.readlines()\n\n bpSimFile = open(\"bugPatchSimilarity.csv\", 'w')\n writer = csv.DictWriter(bpSimFile, fieldnames=['bug', 'patch', 'bleu', 'edit_distance', 'jaccard'], delimiter=\"\\t\")\n writer.writeheader()\n\n for idx, bl in enumerate(buggyLines):\n csvDict = dict()\n csvDict[\"bug\"] = bl.strip()\n csvDict[\"patch\"] = patchLines[idx].strip()\n\n bleu = bleuScore(bl.strip().split(), patchLines[idx].strip().split())\n # csvDict[\"bleu\"] = str(bleu[4])\n aveBLEU = (sum(bleu[:4]) / 4)\n csvDict[\"bleu\"] = str(aveBLEU)\n bleuList.append(aveBLEU)\n\n edit = calculate_edit_distance(bl, patchLines[idx])\n editList.append(edit)\n csvDict[\"edit_distance\"] = str(edit)\n\n jaccard = get_jaccard_sim(bl.strip(), patchLines[idx].strip())\n jaccardList.append(jaccard)\n csvDict[\"jaccard\"] = str(jaccard)\n\n # writerow() has bad behaviors regarding quote marks\n bpSimFile.write('\\t'.join(\n [csvDict[\"bug\"], csvDict[\"patch\"], csvDict[\"bleu\"], csvDict[\"edit_distance\"], csvDict[\"jaccard\"]]) + '\\n')\n print(\"bleu: \" + str(sum(bleuList) / len(bleuList)) + \" edit_distance: \" + str(\n sum(editList) / len(editList)) + \" jaccard: \" + str(sum(jaccardList) / len(jaccardList)))\n perc = sum(i >= 0.5 for i in bleuList) / len(bleuList)\n\n print(\"BLEU > 0.5: \", perc)\n hist(bleuList, bleuBin, \"green\", np.ones(len(bleuList)) / len(bleuList), 'BLEU similarity')\n perc = sum(i <= 1 for i in editList) / len(editList)\n print(\"Edit distance <= 2: \", perc)\n perc = sum(i <= 2 for i in editList) / len(editList)\n print(\"Edit distance <= 3: \", perc)\n hist(editList, editBin, \"skyblue\", np.ones(len(editList)) / len(editList), 'edit distance similarity',\n np.arange(0, 22, 2))\n perc = sum(i >= 0.6 for i in jaccardList) / len(jaccardList)\n print(\"Jaccard distance >= 0.6: \", perc)\n perc = sum(i >= 0.8 for i in jaccardList) / len(jaccardList)\n print(\"Jaccard distance >= 0.8: \", perc)\n hist(jaccardList, jaccardBin, 'gray', np.ones(len(jaccardList)) / len(jaccardList), 'jaccard similarity')\n\n\ndef calcNSim(buggyFile, buggySimFile=None):\n # This function is to analyze whether similar bugs will generate similar fixes.\n nMinList = list()\n if buggySimFile is not None:\n # Calculate similarity between files\n with open(buggyFile, 'r') as bf:\n buggyLines = bf.readlines()\n with open(buggySimFile, 'r') as bsf:\n buggySimLines = bsf.readlines()\n # for ix, src in enumerate(buggyLines[0:5000]):\n for ix, src in enumerate(buggyLines):\n print(\"Calculating No.\" + str(ix) + \" lines......\")\n simList = list()\n for idx, tgt in enumerate(buggySimLines):\n # simScore = calculate_edit_distance(src, tgt)\n simScore = -jaccard_ngram(src, tgt)\n simList.append((simScore, idx))\n nMinElem = Nminelements(simList, 3, skip=False)\n nMinList.append(nMinElem)\n else:\n # Calculate similarity only in single file\n with open(buggyFile, 'r') as bf:\n buggyLines = bf.readlines()\n for ix, src in enumerate(buggyLines[0:10000]):\n print(\"Calculating No.\" + str(ix) + \" lines......\")\n simList = list()\n for idx, tgt in enumerate(buggyLines):\n # simScore = calculate_edit_distance(src, tgt)\n simScore = -jaccard_ngram(src, tgt)\n simList.append((simScore, idx))\n nMinElem = Nminelements(simList, 4)\n nMinList.append(nMinElem)\n return nMinList\n\n\ndef simBug2simPatch():\n buggyHeldOutFile = \"Data/Analysis/valid-test-buggy.txt\"\n patchHeldOutFile = \"Data/Analysis/valid-test-fixed.txt\"\n buggyTrainFile = \"Data/Analysis/train-buggy.txt\"\n patchTrainFile = \"Data/Analysis/train-fixed-filtered.txt\"\n\n # de-duplicate the test data\n with open(\"duplicate_indices.txt\", 'r') as di:\n dupStr = di.readlines()\n\n dupIdx = [int(d.strip()) for d in dupStr]\n\n bleuBuggy = list()\n bleuPatch = list()\n for i in range(1, 6):\n bb = list()\n bp = list()\n bleuBuggy.append(bb)\n bleuPatch.append(bp)\n\n nMinList = calcNSim(buggyHeldOutFile, buggyTrainFile)\n simBPFile = open(\"similarHeldout2Train_full.csv\", 'w')\n writer = csv.DictWriter(simBPFile,\n fieldnames=['bug', 'sim-bug', 'patch', 'sim-patch', 'b1', 'b2', 'b3', 'b4', 'b-BLEU-cum',\n 'b-BLEU-ave', 'b-jaccard' , 'p1', 'p2', 'p3', 'p4', 'p-BLEU-cum', 'p-BLEU-ave', 'p-jaccard'],\n delimiter=\"\\t\")\n writer.writeheader()\n\n bh = open(buggyHeldOutFile, 'r')\n bLines = bh.readlines()\n bt = open(buggyTrainFile, 'r')\n btLines = bt.readlines()\n\n ph = open(patchHeldOutFile, 'r')\n pLines = ph.readlines()\n pt = open(patchTrainFile, 'r')\n ptLines = pt.readlines()\n\n for idx, l in enumerate(nMinList):\n if idx in dupIdx:\n continue\n\n buggyLine = bLines[idx]\n patchLine = pLines[idx]\n for pair in l:\n # pair: (score, lineNo)\n csvDict = dict()\n csvDict['bug'] = buggyLine.strip().replace('\\t', ' ')\n csvDict['patch'] = patchLine.strip().replace('\\t', ' ')\n csvDict['sim-bug'] = btLines[pair[1]].strip().replace('\\t', ' ')\n csvDict['sim-patch'] = ptLines[pair[1]].strip().replace('\\t', ' ')\n\n bScoreBuggy = bleuScore(btLines[pair[1]].strip().split(), buggyLine.strip().split())\n csvDict['b1'], csvDict['b2'], csvDict['b3'], csvDict['b4'], csvDict['b-BLEU-cum'] = \\\n bScoreBuggy[0], bScoreBuggy[1], bScoreBuggy[2], bScoreBuggy[3], bScoreBuggy[4]\n csvDict['b-BLEU-ave'] = sum(bScoreBuggy[:4]) / 4\n csvDict['b-jaccard'] = jaccard_ngram(csvDict['bug'], csvDict['sim-bug'])\n\n bScorePatch = bleuScore(ptLines[pair[1]].strip().split(), patchLine.strip().split())\n csvDict['p1'], csvDict['p2'], csvDict['p3'], csvDict['p4'], csvDict['p-BLEU-cum'] = \\\n bScorePatch[0], bScorePatch[1], bScorePatch[2], bScorePatch[3], bScorePatch[4]\n csvDict['p-BLEU-ave'] = sum(bScorePatch[:4]) / 4\n csvDict['p-jaccard'] = jaccard_ngram(csvDict['patch'], csvDict['sim-patch'])\n # writerow() has bad behaviors regarding quote marks\n simBPFile.write('\\t'.join(\n [csvDict['bug'], csvDict['sim-bug'], csvDict['patch'], csvDict['sim-patch'], str(csvDict['b1']),\n str(csvDict['b2']), str(csvDict['b3']), str(csvDict['b4']), str(csvDict['b-BLEU-cum']),\n str(csvDict['b-BLEU-ave']), str(csvDict['b-jaccard']),\n str(csvDict['p1']), str(csvDict['p2']), str(csvDict['p3']), str(csvDict['p4']),\n str(csvDict['p-BLEU-cum']), str(csvDict['p-BLEU-ave']), str(csvDict['p-jaccard'])]) + '\\n')\n for ix, b in enumerate(bScoreBuggy):\n bleuBuggy[ix].append(b)\n for ix, p in enumerate(bScorePatch):\n bleuPatch[ix].append(p)\n\n print(len(bleuBuggy), len(bleuPatch))\n\n simBPFile.close()\n\n\ndef newVocab(buggyFile, patchFile):\n with open(buggyFile, 'r', encoding='ISO-8859-1') as bf:\n buggyLines = bf.readlines()\n with open(patchFile, 'r', encoding='ISO-8859-1') as pf:\n patchLines = pf.readlines()\n\n newVocab = 0\n\n for idx, bl in enumerate(buggyLines):\n b = set(bl.strip().split('\\t'))\n p = set(patchLines[idx].strip().split('\\t'))\n \n if not p.issubset(b):\n diff = p.difference(b)\n bl = bl.strip().replace('\\t', ' ')\n pl = patchLines[idx].strip().replace('\\t', ' ')\n newVocab += 1 \n\n print (newVocab / len(buggyLines))\n\n\ndef processContextLine(cl, size):\n cl = cl.strip().split(\"###\")\n cRange, fileText = cl[0], cl[1]\n cRange = [int(cRange.split(',')[0].strip('[')), int(cRange.split(',')[1].strip(']'))]\n fileText = fileText.split('\\t')\n if size != 'all':\n context = fileText[(cRange[0] - size):(cRange[1] + size +1)]\n else:\n context = fileText[:]\n return context\n\n\ndef newVocabContext(buggyFile, patchFile, N):\n with open(buggyFile, 'r', encoding='ISO-8859-1') as bf:\n buggyLines = bf.readlines()\n with open(patchFile, 'r', encoding='ISO-8859-1') as pf:\n patchLines = pf.readlines()\n\n newVocab = 0\n for idx, bl in enumerate(buggyLines):\n b = set(processContextLine(bl, N))\n p = set(patchLines[idx].strip().split('\\t'))\n if not p.issubset(b):\n newVocab += 1\n diff = p.difference(b)\n\n print (newVocab / len(buggyLines))\n\n\ndef syntaxSim(buggyFile, patchFile):\n with open(buggyFile, 'r', encoding='ISO-8859-1') as bf:\n buggyLines = bf.readlines()\n with open(patchFile, 'r', encoding='ISO-8859-1') as pf:\n patchLines = pf.readlines()\n\n sameSyntax = 0\n cnt = 0\n\n for idx, bl in enumerate(buggyLines):\n try:\n bs = toJavaSourceCode(bl.strip())\n buggyTokenType = lexicalAnalysis(bs)\n ps = toJavaSourceCode(patchLines[idx].strip())\n patchTokenType = lexicalAnalysis(ps)\n except:\n continue\n cnt += 1\n if buggyTokenType != '' and patchTokenType != '' and buggyTokenType == patchTokenType:\n sameSyntax += 1\n\n print (sameSyntax / cnt)\n\n\n# section 4.1.1 experiment\n# newVocab('Data/Analysis/train-buggy.txt', 'Data/Analysis/train-fixed-filtered.txt')\n# newVocab('Data/Analysis/train-bpe-buggy.txt', 'Data/Analysis/train-bpe-fixed-filtered.txt')\n# newVocabContext('Data/Analysis/train-context.txt', 'Data/Analysis/train-fixed-filtered.txt', 'all')\n# newVocabContext('Data/Analysis/train-context-bpe.txt', 'Data/Analysis/train-bpe-fixed-filtered.txt', 'all')\n\n# section 4.1.2 experiment: (bug, sim-bug) vs (patch, sim-patch) analysis\n# simBug2simPatch()\n# You need to delete the samples cannot be processed to continue, or directly draw heatmap by given results\n# draw heatmap\n# heatMapFromCSV(\"similarHeldout2Train_full.csv\", \"b-BLEU-ave\", \"p-BLEU-ave\", \"BLEU\")\n# heatMapFromCSV(\"similarHeldout2Train_full.csv\", \"b-jaccard\", \"p-jaccard\", \"Jaccard\")\n\n# section 4.1.2 experiment: Table 2; Please change the threshold for the result of each row.\n# semanticAmbiguity(\"similarHeldout2Train_full.csv\")\n\n# section 4.2.1 experiment\n# Similarity analysis between patches and bugs\n# bugPatchSim(\"Data/Analysis/train-buggy.txt\", \"Data/Analysis/train-fixed-filtered.txt\")\n\n# section 4.2.2 experiment\n# syntaxSim('Data/Analysis/train-buggy.txt', 'Data/Analysis/train-fixed-filtered.txt')\n# syntaxSim('train-buggy-unseen.txt', 'train-fixed-filtered-unseen.txt')\n# syntaxSim('train-buggy-seen.txt', 'train-fixed-filtered-seen.txt')\n\n\n" }, { "alpha_fraction": 0.6181727647781372, "alphanum_fraction": 0.6295928359031677, "avg_line_length": 33.11864471435547, "blob_id": "028710a7a09b8d92b9f0be7475f75c4408d86b35", "content_id": "796b84329750b453bec60c2960210c93441c86f1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2014, "license_type": "permissive", "max_line_length": 108, "num_lines": 59, "path": "/Model/vocabulary.py", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "\nRESERVED_TOKENS = [\"<s>\", \"</s>\", \"<pad>\"]\n\t\nclass VocabularyBuilder():\n\t\n\tdef __init__(self, token_generator=None, vocab_path=None):\n\t\tself.load_vocab(vocab_path)\n\t\tself.vocab_dim = len(self.w2i)\n\t\tself.vocab_key = lambda w: self.w2i[w] if w in self.w2i else self.w2i[\"<unk>\"] # Convenience function\n\t\t\n\t\tself.bpe_cache = {}\n\t\tself.bpe_lookup_dict = {}\n\t\tfor token in self.w2i.keys():\n\t\t\tif token[:2] not in self.bpe_lookup_dict:\n\t\t\t\tself.bpe_lookup_dict[token[:2]] = set([token])\n\t\t\telse:\n\t\t\t\tself.bpe_lookup_dict[token[:2]].add(token)\n\t\n\tdef load_vocab(self, vocab_path):\n\t\twith open(vocab_path, \"r\", encoding=\"utf8\") as f:\t\n\t\t\tvocab = [l.rstrip('\\n').split(\"\\t\")[1] for l in f.readlines()]\n\t\tself.w2i = {w:i for i, w in enumerate(vocab)}\n\t\tself.i2w = {i:w for w, i in self.w2i.items()}\n\t\n\tdef tokenize(self, label):\n\t\tif label in RESERVED_TOKENS: return [label]\n\t\tlabel = \"\".join([c for c in label if ord(c) >= 32 and ord(c) < 127]) + \"#\"\n\t\ttokens = []\n\t\tix = 0\n\t\tif label in self.bpe_cache and self.bpe_cache[label] is not None:\n\t\t\treturn self.bpe_cache[label]\n\t\twhile ix < len(label):\n\t\t\tif ix == len(label) - 2:\n\t\t\t\ttokens.append(label[ix:])\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcandidates = self.bpe_lookup_dict.get(label[ix:ix+2], [])\n\t\t\t\tif not candidates: top_candidate = label[ix]\n\t\t\t\telse:\n\t\t\t\t\t# Only sub-tokens that match the next characters and don't leave the end-of-word marker left by itself\n\t\t\t\t\tcandidates = [t for t in candidates if t == label[ix:ix+len(t)] and not len(label) == ix + len(t) + 1] \n\t\t\t\t\tif not candidates: top_candidate = label[ix]\n\t\t\t\t\telse: top_candidate = max(candidates, key=lambda e: len(e))\n\t\t\t\ttokens.append(top_candidate)\n\t\t\t\tix += len(top_candidate)\n\t\tself.bpe_cache[label] = tokens\n\t\treturn tokens\n\t\n\tdef undo_bpe(self, tokens):\n\t\tcleaned = []\n\t\tcurr = \"\"\n\t\tfor t in tokens:\n\t\t\tif t.endswith(\"#\"):\n\t\t\t\tcleaned.append(curr+t[:-1])\n\t\t\t\tcurr = \"\"\n\t\t\telif curr == \"\" and t in RESERVED_TOKENS:\n\t\t\t\tcleaned.append(t)\n\t\t\telse: curr += t\n\t\tif curr: cleaned.append(curr)\n\t\treturn cleaned\n" }, { "alpha_fraction": 0.704049825668335, "alphanum_fraction": 0.7102803587913513, "avg_line_length": 32.24137878417969, "blob_id": "85b4eb5bd6b34cfac29a95c7c5489cf6f88b274e", "content_id": "92e60fd5459be00e0a968157089eb68d76a62e82", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 963, "license_type": "permissive", "max_line_length": 167, "num_lines": 29, "path": "/Analysis/code/filter_consecutive_identical.py", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "def intersection(lst1, lst2): \n lst3 = [value for value in lst1 if value in lst2] \n return lst3\n\ndef findConsecutiveSame(heldoutB, trainB, heldoutP=None, trainP=None):\n\twith open(heldoutB, 'r') as hb:\n\t\theldoutBList = hb.readlines()\n\twith open(heldoutP, 'r') as hp:\n\t\theldoutPList = hp.readlines()\n\twith open(trainB, 'r') as tb:\n\t\ttrainBList = tb.readlines()\n\twith open(trainP, 'r') as tp:\n\t\ttrainPList = tp.readlines()\n\tlineNoB = list()\n\tlineNoP = list()\n\tfor idx, hLine in enumerate(heldoutBList):\n\t\tif hLine in trainBList:\n\t\t\tlineNoB.append(idx)\n\tfor idx, hLine in enumerate(heldoutPList):\n\t\tif hLine in trainPList:\n\t\t\tlineNoP.append(idx)\n\n\tintersec = intersection(lineNoB, lineNoP)\n\n\twith open (\"duplicate_indices_test.txt\", 'w') as di:\n\t\tfor l in intersec:\n\t\t\tdi.write(str(l) + \"\\n\")\n\nfindConsecutiveSame(\"Data/Analysis/test-buggy.txt\", \"Data/Analysis/train-buggy.txt\", \"Data/Analysis/test-fixed-filtered.txt\", \"Data/Analysis/train-fixed-filtered.txt\")" }, { "alpha_fraction": 0.6433120965957642, "alphanum_fraction": 0.6576433181762695, "avg_line_length": 37.30487823486328, "blob_id": "b33dba8710d334441bcf0dd2f28d6b5889eb005c", "content_id": "e24a2f54388463635c3d24a82ed9c520c2da2e4a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3140, "license_type": "permissive", "max_line_length": 127, "num_lines": 82, "path": "/Model/evaluate_model.py", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "import math\nimport yaml\nimport argparse\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom data_reader import DataReader\nfrom metrics import MetricsTracker\nfrom tracker import Tracker\nfrom transformer_patching_model import TransformerPatchingModel\n\nconfig = yaml.safe_load(open(\"config.yml\"))\n\ndef main():\n\t# Extract arguments\n\tap = argparse.ArgumentParser()\n\tap.add_argument(\"data\", help=\"Data file containing bugs\")\n\tap.add_argument(\"vocabulary\", help=\"Vocabulary file\")\n\tap.add_argument(\"-s\", \"--suffix\", help=\"Model and log-file suffix\")\n\targs = ap.parse_args()\n\t\n\tdata = DataReader(config[\"data\"], data_file=args.data, vocab_path=args.vocabulary)\n\tmodel = TransformerPatchingModel(config[\"transformer\"], data.vocabulary.vocab_dim, is_pointer=config[\"data\"][\"edits\"])\n\t\n\t# Restore model after a simple init\n\ttracker = Tracker(model, suffix=args.suffix)\n\tmodel(tf.zeros((1, 2), 'int32'), tf.zeros((1, 2), 'int32'), tf.zeros((1, 2), 'int32'), tf.zeros((0, 0), 'int32'), True)\n\ttracker.restore(best_only=True)\n\t\n\twith open(\"results\" + (\"\" if args.suffix is None else \"-\" + args.suffix) + \".txt\", \"w\") as f_out:\n\t\tfor batch in data.batcher(mode=\"test\", optimize_packing=False):\n\t\t\tpre, pre_locs = batch[:2]\n\t\t\tpreds = model.predict(data.vocabulary, pre, pre_locs, config[\"data\"][\"beam_size\"], config[\"data\"][\"max_bug_length\"])\n\t\t\twrite_completions(f_out, data.vocabulary, pre.numpy(), pre_locs.numpy(), preds)\n\ndef write_completions(f_out, vocabulary, bugs, bug_locs, completions, pointer_locs=None):\n\tfor bug_ix, comp_list in enumerate(completions):\n\t\tf_out.write('\\n')\n\t\tif not comp_list:\n\t\t\tf_out.write('\\n')\n\t\t\tcontinue\n\t\tbug_subtokens = [vocabulary.i2w[ix] for ix in bugs[bug_ix]]\n\t\tif '<pad>' in bug_subtokens:\n\t\t\tbug_subtokens = bug_subtokens[:bug_subtokens.index('<pad>')]\n\t\tif config[\"data\"][\"add_context\"]:\n\t\t\tbug_subtokens = bug_subtokens[bug_locs[bug_ix][0]:bug_locs[bug_ix][1]]\n\t\tbug = vocabulary.undo_bpe(bug_subtokens)\n\t\tbug = \"\\t\".join(bug)\n\t\tf_out.write(bug)\n\t\tf_out.write('\\n')\n\t\tfix_probs = [math.exp(-p) for p, _, _ in comp_list]\n\t\tfix_probs_total = sum(fix_probs)\n\t\tfix_probs = [p/fix_probs_total for p in fix_probs]\n\t\tfor ix, (_, pointer_locs, bug_fix) in enumerate(comp_list):\n\t\t\tfix_prob = fix_probs[ix]\n\t\t\tfix_ent = -math.log2(fix_prob + 1e-9)\n\t\t\tfix_subtokens = [vocabulary.i2w[b] for b in bug_fix]\n\t\t\tif '</s>' in fix_subtokens:\n\t\t\t\tfix_subtokens = fix_subtokens[:fix_subtokens.index('</s>')]\n\t\t\tif pointer_locs:\n\t\t\t\tpointer_locs[0] -= bug_locs[bug_ix][0]\n\t\t\t\tpointer_locs[1] -= bug_locs[bug_ix][0]\n\t\t\t\tbug_fix = bug_subtokens[:pointer_locs[0]]\n\t\t\t\tbug_fix += fix_subtokens\n\t\t\t\tif pointer_locs[1] > 0 or pointer_locs[0] == 0:\n\t\t\t\t\tbug_fix += bug_subtokens[pointer_locs[1] + 1:]\n\t\t\t\telse:\n\t\t\t\t\tbug_fix += bug_subtokens[pointer_locs[0]:]\n\t\t\telse:\n\t\t\t\tbug_fix = fix_subtokens\n\t\t\tbug_fix = \"\\t\".join(vocabulary.undo_bpe(bug_fix))\n\t\t\tf_out.write('{0:.2%}'.format(fix_prob))\n\t\t\tf_out.write(': ')\n\t\t\tf_out.write(bug_fix)\n\t\t\tf_out.write('\\n')\n\t\t\tif ix == 0:\n\t\t\t\tprint(\"{0:.3f}/{1:.2%}, {2} --> {3}\".format(fix_ent, fix_prob, \" \".join(bug.split(\"\\t\")), \" \".join(bug_fix.split(\"\\t\"))))\n\n\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.6426343321800232, "alphanum_fraction": 0.6607485413551331, "avg_line_length": 45.31111145019531, "blob_id": "0b7db7676124494759ac58762d5a6ae732d235ee", "content_id": "8b4f0a78caf9f6adb9be596e94f524bff68ff8a7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8336, "license_type": "permissive", "max_line_length": 178, "num_lines": 180, "path": "/Model/train_model.py", "repo_name": "ARiSE-Lab/Patch-as-translation", "src_encoding": "UTF-8", "text": "import math\nimport yaml\nimport argparse\n\nimport tensorflow as tf\n\nfrom data_reader import DataReader\nfrom metrics import MetricsTracker\nfrom tracker import Tracker\nfrom transformer_patching_model import TransformerPatchingModel\n\nconfig = yaml.safe_load(open(\"config.yml\"))\n\ndef main():\n\t# Extract arguments\n\tap = argparse.ArgumentParser()\n\tap.add_argument(\"data\", help=\"File with pre-processed data (optional)\")\n\tap.add_argument(\"vocabulary\", help=\"Vocabulary file (optional)\")\n\tap.add_argument(\"-s\", \"--suffix\", help=\"Model and log-file suffix\")\n\targs = ap.parse_args()\n\tprint(\"Using configuration:\", config)\n\tdata = DataReader(config[\"data\"], args.data, args.vocabulary)\n\tmodel = TransformerPatchingModel(config[\"transformer\"], data.vocabulary.vocab_dim, is_pointer=config[\"data\"][\"edits\"])\n\ttrain(model, data, suffix=args.suffix)\n\ndef train(model, data, suffix=None):\n\toptimizer = tf.optimizers.Adam(config[\"training\"][\"lr\"])\n\ttracker = Tracker(model, suffix=suffix)\n\tmodel(tf.zeros((1, 2), 'int32'), tf.zeros((1, 2), 'int32'), tf.zeros((1, 2), 'int32'), tf.zeros((0, 0), 'int32'), True)\n\ttracker.restore()\n\t\n\ttotal_batches = 0\n\tcurrent_epoch = tracker.ckpt.step.numpy()\n\tfor epoch in range(current_epoch, config[\"training\"][\"num_epochs\"]):\n\t\tprint(\"Epoch:\", epoch + 1)\n\t\tmbs = 0\n\t\twords = 0\n\t\tmetrics = MetricsTracker()\n\t\t# Batcher returns a square index array and a binary mask indicating which words are padding (0) and real (1)\n\t\tfor batch in data.batcher(mode=\"train\"):\n\t\t\tmbs += 1\n\t\t\ttotal_batches += 1\n\t\t\t\n\t\t\tif config[\"data\"][\"edits\"]:\n\t\t\t\tpre, pre_locs, post, pointer_locs = batch\n\t\t\telse:\n\t\t\t\tpre, pre_locs, post = batch\n\t\t\t\tpointer_locs = tf.zeros((0, 0), 'int32')\n\t\t\tsamples = int(tf.reduce_sum(1 - tf.clip_by_value(pre, 0, 1)).numpy() + tf.reduce_sum(1 - tf.clip_by_value(post[:, 1:], 0, 1)).numpy())\n\t\t\twords += samples\n\t\t\t\n\t\t\t# Compute loss in scope of gradient-tape (can also use implicit gradients)\n\t\t\twith tf.GradientTape(watch_accessed_variables=False) as tape:\n\t\t\t\ttape.watch(model.trainable_variables)\n\t\t\t\tpreds = model(pre, pre_locs, post[:, :-1], pointer_locs, training=True)\n\t\t\t\tif config[\"data\"][\"edits\"]:\n\t\t\t\t\tpreds, pointer_preds = preds\n\t\t\t\t\tloss = masked_ce_loss(post[:, 1:], preds, pointer_locs, pointer_preds)\n\t\t\t\telse:\n\t\t\t\t\tloss = masked_ce_loss(post[:, 1:], preds)\n\t\t\t\n\t\t\t# Collect gradients, clip and apply\n\t\t\tgrads = tape.gradient(loss, model.trainable_variables)\n\t\t\tgrads, _ = tf.clip_by_global_norm(grads, 0.25)\n\t\t\toptimizer.apply_gradients(zip(grads, model.trainable_variables))\n\t\t\t\n\t\t\t# Update average loss and print if applicable\n\t\t\tif config[\"data\"][\"edits\"]:\n\t\t\t\tmetrics.add_observation(samples, post[:, 1:], preds, pointer_locs, pointer_preds)\n\t\t\telse:\n\t\t\t\tmetrics.add_observation(samples, post[:, 1:], preds)\n\t\t\tif mbs % config[\"training\"][\"print_freq\"] == 0:\n\t\t\t\tif config[\"data\"][\"edits\"]:\n\t\t\t\t\tprint(\"MB: {0}, bugs: {1}, tokens: {2}, entropy: {3}, acc: {4}, pointer acc: {5}, full seq acc: {6}\".format(mbs, *metrics.get_stats()))\n\t\t\t\telse:\n\t\t\t\t\tprint(\"MB: {0}, bugs: {1}, tokens: {2}, entropy: {3}, acc: {4}, full seq acc: {5}\".format(mbs, *metrics.get_stats()))\n\t\t\t\tmetrics.flush()\n\t\t\t\tif mbs % (5*config[\"training\"][\"print_freq\"]) == 0:\n\t\t\t\t\tif config[\"data\"][\"edits\"]:\n\t\t\t\t\t\tanalyze_sample(data, pre, pre_locs, post, preds, pointer_locs, pointer_preds)\n\t\t\t\t\telse:\n\t\t\t\t\t\tanalyze_sample(data, pre, pre_locs, post, preds)\n\n\t\tif metrics.total_acc_count > 0:\n\t\t\tif config[\"data\"][\"edits\"]:\n\t\t\t\tprint(\"MB: {0}, bugs: {1}, tokens: {2}, entropy: {3}, acc: {4}, pointer acc: {5}, full seq acc: {6}\".format(mbs, *metrics.get_stats()))\n\t\t\telse:\n\t\t\t\tprint(\"MB: {0}, bugs: {1}, tokens: {2}, entropy: {3}, acc: {4}, full seq acc: {5}\".format(mbs, *metrics.get_stats()))\n\t\t\n\t\tstats, top_k_accs = eval(model, data)\n\t\tif config[\"data\"][\"edits\"]:\n\t\t\tbugs, tokens, entropy, accs, pointer_accs, full_accs = stats\n\t\t\tprint(\"Validation: bugs: {0}, tokens: {1}, entropy: {2}, accuracy: {3}, pointer acc: {4}, full seq accuracy: {5}\".format(bugs, tokens, entropy, accs, pointer_accs, full_accs))\n\t\t\ttracker.update(model, [full_accs, pointer_accs, *top_k_accs])\n\t\telse:\n\t\t\tbugs, tokens, entropy, accs, full_accs = stats\n\t\t\tprint(\"Validation: bugs: {0}, tokens: {1}, entropy: {2}, accuracy: {3}, full seq accuracy: {4}\".format(bugs, tokens, entropy, accs, full_accs))\n\t\t\ttracker.update(model, [full_accs, *top_k_accs])\n\ndef eval(model, data, validate=True):\n\tmbs = 0\n\tmetrics = MetricsTracker()\n\ttop_k_accs = [0.0]*config[\"data\"][\"beam_size\"]\n\tfor batch in data.batcher(mode=\"valid\" if validate else \"test\"):\n\t\tif config[\"data\"][\"edits\"]:\n\t\t\tpre, pre_locs, post, pointer_locs = batch\n\t\telse:\n\t\t\tpre, pre_locs, post = batch\n\t\t\tpointer_locs = tf.zeros((0, 0), 'int32')\n\t\tmbs += 1\n\t\tsamples = int(tf.reduce_sum(1 - tf.clip_by_value(pre, 0, 1)).numpy() + tf.reduce_sum(1 - tf.clip_by_value(post[:, 1:], 0, 1)).numpy())\n\t\tpreds = model(pre, pre_locs, post[:, :-1], pointer_locs, training=False)\n\t\tif config[\"data\"][\"edits\"]:\n\t\t\tpreds, pointer_preds = preds\n\t\tbeams = model.predict(data.vocabulary, pre, pre_locs, config[\"data\"][\"beam_size\"], config[\"data\"][\"max_bug_length\"])\n\t\ttargets = post[:, 1:].numpy().tolist()\n\t\ttargets = [[t for t in tgt if t > 0] for tgt in targets]\n\t\tfor ix, beam in enumerate(beams):\n\t\t\tfor rank, (prob, locs, pred) in enumerate(beam):\n\t\t\t\tif pred[:len(targets[ix])] == targets[ix] and (not locs or locs == pointer_locs[ix].numpy().tolist()):\n\t\t\t\t\ttop_k_accs[rank] += 1\n\t\t\t\t\tbreak\n\t\tif config[\"data\"][\"edits\"]:\n\t\t\tmetrics.add_observation(samples, post[:, 1:], preds, pointer_locs, pointer_preds)\n\t\telse:\n\t\t\tmetrics.add_observation(samples, post[:, 1:], preds)\n\t\tif mbs % ((1 if validate else 5)*config[\"training\"][\"print_freq\"]) == 0:\n\t\t\tif config[\"data\"][\"edits\"]:\n\t\t\t\tanalyze_sample(data, pre, pre_locs, post, preds, pointer_locs, pointer_preds)\n\t\t\telse:\n\t\t\t\tanalyze_sample(data, pre, pre_locs, post, preds)\n\tfor k in range(len(top_k_accs)):\n\t\ttop_k_accs[k] /= metrics.total_samples\n\tprint(\"Top K accuracies: {0}\".format(\", \".join([\"{0}: {1:.2%}\".format(ix + 1, acc) for ix, acc in enumerate(top_k_accs)])))\n\treturn metrics.get_stats(), top_k_accs\n\n# Prints a bug/repair pair with the (teacher-forced) generated fix for reference\ndef analyze_sample(data, pre, pre_locs, post, preds, pointer_locs=None, pointer_preds=None):\n\toffsets = [int(l) for l in pre_locs[0].numpy()]\n\tif pointer_locs is not None:\n\t\tpointer_locs = pointer_locs[0].numpy()\n\t\tmax_pointers = tf.argmax(pointer_preds[0], -1).numpy()\n\t\tmax_pointers[0] -= offsets[0]\n\t\tmax_pointers[1] -= offsets[0]\n\t\tpointer_locs[0] -= offsets[0]\n\t\tpointer_locs[1] -= offsets[0]\n\t\tbuggy_tokens = []\n\t\tfor pos, ix in enumerate(pre[0, offsets[0]:offsets[1]].numpy()):\n\t\t\tif data.vocabulary.i2w[ix] == \"<pad>\": continue\n\t\t\tif pos == max_pointers[0]: buggy_tokens.append('>')\n\t\t\tif pos == pointer_locs[0]: buggy_tokens.append('^')\n\t\t\tbuggy_tokens.append(data.vocabulary.i2w[ix])\n\t\t\tif pos == max_pointers[1]: buggy_tokens.append('<')\n\t\t\tif pos == pointer_locs[1]: buggy_tokens.append('$')\n\t\tbuggy_tokens = data.vocabulary.undo_bpe(buggy_tokens)\n\telse:\n\t\tbuggy_tokens = data.vocabulary.undo_bpe([data.vocabulary.i2w[ix] for ix in pre[0, offsets[0]:offsets[1]].numpy() if data.vocabulary.i2w[ix] != \"<pad>\"])\n\treal_fix_tokens = data.vocabulary.undo_bpe([data.vocabulary.i2w[ix] for ix in post[0, 1:].numpy() if data.vocabulary.i2w[ix] != \"<pad>\"])\n\tmodel_fix_tokens = data.vocabulary.undo_bpe([data.vocabulary.i2w[ix] for ix in tf.argmax(preds[0], -1).numpy()])[:len(real_fix_tokens)]\n\tprint(\"Sample:\", \" \".join(buggy_tokens))\n\tprint(\" --> \", \" \".join(model_fix_tokens))\n\tprint(\"Actual:\", \" \".join(real_fix_tokens))\n\n# Compute cross-entropy loss, making sure not to include \"masked\" padding tokens\ndef masked_ce_loss(post_indices, preds, pointer_locs=None, pointer_preds=None):\n\tpost_masks = tf.cast(tf.clip_by_value(post_indices, 0, 1), \"float32\")\n\tsamples = tf.reduce_sum(post_masks)\n\tloss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=post_indices, logits=preds)\n\tloss *= post_masks\n\tloss = tf.reduce_sum(loss) / samples\n\tif pointer_locs is not None:\n\t\tpointer_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=pointer_locs, logits=pointer_preds)\n\t\tpointer_loss = tf.reduce_sum(pointer_loss, -1)\n\t\tpointer_loss = tf.reduce_mean(pointer_loss)\n\t\tloss += pointer_loss\n\treturn loss\n\n\nif __name__ == '__main__':\n\tmain()\n" } ]
14
faizH3/faiz
https://github.com/faizH3/faiz
303dc7a026751a29cca36efc21c7708c659303c1
c94871a7553be6c6b9fb61e204c1cdf9174e33ff
2031e84e06d00a85e11855451e8df81cb7d42f03
refs/heads/master
2020-12-13T04:09:05.087928
2020-11-14T06:02:49
2020-11-14T06:02:49
234,309,554
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5521885752677917, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 21.846153259277344, "blob_id": "bc8695577d97e997a48d717f5c221019fc3e96e6", "content_id": "a46fd3482624479a84bdabc0bb74f5bec1fd4665", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "permissive", "max_line_length": 62, "num_lines": 13, "path": "/Pyth_speakdroid.py", "repo_name": "faizH3/faiz", "src_encoding": "UTF-8", "text": "# newbie\n#education\n#python v3 in this android\n\nimport android\n\nprint ('===============================')\nprint (' this is program speak.py')\nprint ('===============================')\n\ndroid = android.Android()\ntext = droid.dialogGetInput('TTS', 'your input text: ').result\ndroid.ttsSpeak(text)\n" }, { "alpha_fraction": 0.7894737124443054, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 8.5, "blob_id": "8c845c25e4433801fa7569d166a08d794410d544", "content_id": "ed58c209bb9fc526fc7d65814b3e7cb5ed71c481", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "permissive", "max_line_length": 9, "num_lines": 2, "path": "/README.md", "repo_name": "faizH3/faiz", "src_encoding": "UTF-8", "text": "# newbie\neducation\n" }, { "alpha_fraction": 0.5219573378562927, "alphanum_fraction": 0.5407779216766357, "avg_line_length": 28.518518447875977, "blob_id": "e6cf143d2e74cf4c776a9d763a2fa3d007ff47ed", "content_id": "aa09119aa629be3f564769b873f92ff17eafb075", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 797, "license_type": "permissive", "max_line_length": 58, "num_lines": 27, "path": "/Program_kalkulator.py", "repo_name": "faizH3/faiz", "src_encoding": "UTF-8", "text": "jawab=\"y\"\nwhile(jawab==\"y\"):\n print(\"=============================================\")\n print(\" PROGRAM CONTOH PENGGUNAAN WHILE\")\n print(\"=============================================\")\n print(\"\")\n print(\"MENU PROGRAM\")\n print(\"1. Menghitung luas segitiga\")\n print(\"2. Menghitung luas lingkaran\")\n A=int(input(\"pilih menu: \"))\n a=(A)\n b=(A)\n if a==1:\n a1=int(input(\"ketikan alas segitiga:\"))\n a2=int(input(\"ketikan tinggi segitiga:\"))\n Luas=(a1*a2)/2\n print(\"Luas=\",Luas)\n elif b==2:\n b1=int(input(\"ketikan jari-jari lingkaran:\"))\n Luas=22/7*(b1**2)\n print(\"Luas=\",Luas)\n jawab= input(\"apakah anda ingin mengulangi lagi? y/n -\")\n if jawab==\"n\":\n print(\"terimakasih sudah mencoba.\")\n break\n elif jawab!=\"y\":\n print(\"input yang anda masukan salah.\")\n" } ]
3
prakhar728/mlh
https://github.com/prakhar728/mlh
83a6b6b472c68acb6a15f1fef08979e59ce375e8
bb5e6fa8349add9c677e26e16790319181bb3063
428d5feb1a989fb8ac25e730d51f2a5fee668d83
refs/heads/master
2023-08-13T03:25:36.927611
2021-10-11T10:53:08
2021-10-11T10:53:08
381,921,221
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7972028255462646, "alphanum_fraction": 0.7972028255462646, "avg_line_length": 30.88888931274414, "blob_id": "d4d6d646cb0bfa17b4473db40c7d3e1ad9619d3a", "content_id": "ed4e0756247e9e4d57e1fa5c1d6a7d0622f58da4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 286, "license_type": "no_license", "max_line_length": 95, "num_lines": 9, "path": "/Day4/RandomNumber/Readme.md", "repo_name": "prakhar728/mlh", "src_encoding": "UTF-8", "text": "# RANDOM NUMBER GENERATOR\nThis python program generates random numbers and is independent of the inbuilt random function.\n\nAs seed it uses the time object.\n\nThe algorithm it works on is the Linear Congruential Generator.\n\n## THE FUTURE\nUpper limit and Lower limit has to be added in it." }, { "alpha_fraction": 0.6438053250312805, "alphanum_fraction": 0.6858407258987427, "avg_line_length": 25.47058868408203, "blob_id": "bdb7c98bea4d4892bf657f6d7e9794742952d212", "content_id": "1c3067b4bb46063207fb7f2b8d295639aaee07d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "no_license", "max_line_length": 57, "num_lines": 17, "path": "/Day4/RandomNumber/RandomNumberGenerator.py", "repo_name": "prakhar728/mlh", "src_encoding": "UTF-8", "text": "\nimport time\n\ndef random_number(seed,num):\n rando=seed\n for i in range(num):\n rando = (13*rando + 53)%90060\n print(rando)\n \nt = time.localtime()\ncurrent_time = time.strftime(\"%H:%M:%S\", t)\nprint(current_time)\nhours=int(current_time[:2])\nminutes=int(current_time[3:5])\nseconds=int(current_time[6:])\nseedRandom=seconds+minutes*60+hours*60*60\nnum=int(input(\"Enter number of random numbers you want\"))\nrandom_number(seedRandom,num)\n\n" } ]
2
JorisHeirman/PySudoku
https://github.com/JorisHeirman/PySudoku
370de85f42f34fdb3adf3d2657359c7bb5af5260
93a2fb8e06a0c718a135602c3755b9b4486c0276
5ad426428b7167bdceeb02b10726261ba32a6bf1
refs/heads/master
2021-01-23T00:40:39.985630
2017-05-30T13:21:35
2017-05-30T13:21:35
92,834,083
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5858798623085022, "alphanum_fraction": 0.5932560563087463, "avg_line_length": 23.973684310913086, "blob_id": "61f45d7b58a0d5f62fc70077d2ec39a367a49383", "content_id": "9d3c8b0440eb465327af5305a92d41306bef61a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 949, "license_type": "no_license", "max_line_length": 74, "num_lines": 38, "path": "/solveSudoku.py", "repo_name": "JorisHeirman/PySudoku", "src_encoding": "UTF-8", "text": "from sudoku.Sudoku import Sudoku\nfrom pprint import pprint\nfrom kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.graphics import Color, Ellipse, Line\nfrom kivy.uix.boxlayout import BoxLayout\n\nclass MySudokuWidget(Widget):\n def on_touch_down(self,touch):\n with self.canvas:\n Color(2,2,2)\n d = 40.\n Ellipse(pos=(touch.x - d / 2, touch.y - d /2), size=(d,d))\n touch.ud['line'] = Line(points =(touch.x, touch.y))\n\n def on_touch_move(self, touch):\n touch.ud['line'].points += [touch.x, touch.y]\n print(touch.ud)\n\nclass MarvelApp(BoxLayout):\n def build(self):\n\n\n\nclass SolveSudokuApp(App):\n pass\n\"\"\"\nclass SolveSudoku(App):\n def build(self):\n s = Sudoku()\n s.printCanvas()\n s.startSolve()\n s.printCanvas()\n return MySudokuWidget()\n\"\"\"\nif __name__ == \"__main__\":\n #SolveSudoku().run()\n Marvel().build()\n" }, { "alpha_fraction": 0.45990440249443054, "alphanum_fraction": 0.468932569026947, "avg_line_length": 26.691177368164062, "blob_id": "cf9b4feb209ef32cbf356f5f72b29ae5773bfe52", "content_id": "66769bfbb64c03efd9ef7093c3f5ffe817ebac42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3766, "license_type": "no_license", "max_line_length": 90, "num_lines": 136, "path": "/sudoku/Sudoku2.py", "repo_name": "JorisHeirman/PySudoku", "src_encoding": "UTF-8", "text": "import numpy\nimport json\n\n\n\nclass Sudoku:\n\nWIDTH = 9\nFULLSUDOKU = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nFILE = 'sudoku1.json'\nFILL = \"x\"\nmatrix = [[FILL]*WIDTH for i in range(9)]\nborders = [2,5,8]\n\n def __init__(self):\n self.matrix = [[FILL]*WIDTH for i in range(WIDTH)]\n with open(FILE) as data_file:\n json_object = json.load(data_file)\n for key in json_object:\n value = json_object[key]\n for v in value:\n self.setNumber(int(key), int(v), int(value[v]))\n\n def printCanvas(self):\n for i in range(WIDTH):\n print \"\"\n for j in range(WIDTH):\n print self.matrix[i][j],\n\n def setNumber(self, x, y, number):\n self.matrix[x][y] = number\n\n def isNumber(self , x , y):\n try:\n value = self.matrix[x][y]\n if value == FILL:\n return False\n else:\n return True\n except Exception as e:\n return False\n\n def isInHorizontalLine(self, l):\n for i in range(WIDTH):\n value = self.matrix[l][i]\n if value != FILL and (value in self.matrix[l+1] or value in self.matrix[l+2]):\n print value\n return True\n\n def findMissingValue(self, x, y):\n nums = []\n for i in range(WIDTH):\n xvalue = self.matrix[x][i]\n if (xvalue != FILL):\n print xvalue\n nums.append(xvalue)\n yvalue = self.matrix[i][y]\n if (yvalue != FILL):\n print yvalue\n nums.append(yvalue)\n\n def listHorizontalLine(self, x):\n nums = []\n for i in range(WIDTH):\n value = self.matrix[x][i]\n if (value != FILL):\n nums.append(value)\n return nums\n\n def listVerticalLine(self, y):\n nums = []\n for i in range(WIDTH):\n value = self.matrix[i][y]\n if (value != FILL):\n nums.append(value)\n return nums\n\n def findSquare(self, x, y):\n nums =[]\n for b in borders:\n for bo in borders:\n if x <= b and y <= bo:\n nums = [b, bo]\n return nums\n return False\n\n def matrixSquare(self,coordinate):\n z = coordinate[0]+1\n a = coordinate[1]+1\n x = coordinate[0]-2\n y = coordinate[1]-2\n threeLine = []\n nums = []\n for i in range(x, z):\n for j in range(y, a):\n threeLine.append(self.matrix[i][j])\n nums.append(threeLine)\n threeLine = []\n return nums\n\n def listSquare(self, coordinate):\n z = coordinate[0]+1\n a = coordinate[1]+1\n x = coordinate[0]-2\n y = coordinate[1]-2\n threeLine = []\n nums = []\n for i in range( x, z):\n\n for j in range(y, a):\n value = self.matrix[i][j]\n if (value != FILL):\n nums.append(value)\n return nums\n\n def search(self, x, y):\n horizontal = set(self.listHorizontalLine(x))\n vertical = set(self.listVerticalLine(y))\n square = set(self.listSquare(self.findSquare(x,y)))\n result = (horizontal | vertical | square)\n numbers = FULLSUDOKU - result\n if len(numbers) == 1:\n number = numbers.pop()\n return number\n return False\n\n\n def startSolve(self, s):\n for i in range(WIDTH):\n for j in range(WIDTH):\n if not s.isNumber(i, j):\n k = s.search(i , j)\n if not k:\n print \"\"\n else:\n s.setNumber(i , j , k)\n" }, { "alpha_fraction": 0.567669153213501, "alphanum_fraction": 0.5827067494392395, "avg_line_length": 23.18181800842285, "blob_id": "51992971ab8d180470fcbe2c17b995454da37dba", "content_id": "a1dd7033415bc692f2945e8bd7894e79a097320a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 36, "num_lines": 11, "path": "/setup.py", "repo_name": "JorisHeirman/PySudoku", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\nsetup(name='sudoku',\n version='0.0.0.1',\n description='sudoku packages',\n url='http://trainr.be',\n author='Trainr',\n author_email='[email protected]',\n license='MIT',\n packages=['pydoku'],\n zip_safe=False)\n" }, { "alpha_fraction": 0.4588492214679718, "alphanum_fraction": 0.4679533839225769, "avg_line_length": 26.73737335205078, "blob_id": "6fe5fcd281e18492ae0e330a133089dc3ad31adc", "content_id": "e95ab18209b1dadc111fa341576a374580a01880", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2746, "license_type": "no_license", "max_line_length": 67, "num_lines": 99, "path": "/sudoku/Sudoku.py", "repo_name": "JorisHeirman/PySudoku", "src_encoding": "UTF-8", "text": "import json\n\nWIDTH = 9\nFULLSUDOKU = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nFILE = 'sudoku1.json'\nFILL = \"x\"\nmatrix = [[FILL]*WIDTH for i in range(9)]\nborders = [2,5,8]\ncounter = 0\n\nclass Sudoku:\n\n def __init__(self):\n self.matrix = [[FILL]*WIDTH for i in range(WIDTH)]\n with open(FILE) as data_file:\n json_object = json.load(data_file)\n for key in json_object:\n value = json_object[key]\n for v in value:\n self.setNumber(int(key), int(v), int(value[v]))\n\n def printCanvas(self):\n for i in range(WIDTH):\n print \"\"\n for j in range(WIDTH):\n print self.matrix[i][j],\n\n def setNumber(self, x, y, number):\n self.matrix[x][y] = number\n\n def isNumber(self , x , y):\n try:\n value = self.matrix[x][y]\n if value == FILL:\n return False\n else:\n return True\n except Exception as e:\n return False\n\n def listHorizontalLine(self, x):\n nums = []\n for i in range(WIDTH):\n value = self.matrix[x][i]\n if (value != FILL):\n nums.append(value)\n return nums\n\n def listVerticalLine(self, y):\n nums = []\n for i in range(WIDTH):\n value = self.matrix[i][y]\n if (value != FILL):\n nums.append(value)\n return nums\n\n def findSquare(self, x, y):\n nums =[]\n for b in borders:\n for bo in borders:\n if x <= b and y <= bo:\n nums = [b, bo]\n return nums\n return False\n\n\n def listSquare(self, coordinate):\n z = coordinate[0]+1\n a = coordinate[1]+1\n x = coordinate[0]-2\n y = coordinate[1]-2\n nums = []\n for i in range( x, z):\n for j in range(y, a):\n value = self.matrix[i][j]\n if (value != FILL):\n nums.append(value)\n return nums\n\n def search(self, x, y):\n horizontal = set(self.listHorizontalLine(x))\n vertical = set(self.listVerticalLine(y))\n square = set(self.listSquare(self.findSquare(x,y)))\n result = (horizontal | vertical | square)\n numbers = FULLSUDOKU - result\n if len(numbers) == 1:\n number = numbers.pop()\n return number\n return False\n\n def startSolve(self):\n for i in range(WIDTH):\n for j in range(WIDTH):\n if not self.isNumber(i, j):\n k = self.search(i , j)\n if not k:\n print \"\"\n else:\n self.setNumber(i , j , k)\n" } ]
4
spydir/mlaz
https://github.com/spydir/mlaz
25d7c145d361ea27dd89c3503aed07de7e9f05f8
b793de3c4b86d83604bbdcf39e4224fa40cb67f9
4de2742a8438c749ecd8edbacbce96c8d9206cb1
refs/heads/master
2020-06-08T11:52:36.977932
2019-07-23T12:19:26
2019-07-23T12:19:26
193,224,032
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7101321816444397, "alphanum_fraction": 0.7224669456481934, "avg_line_length": 27.399999618530273, "blob_id": "991950114a53f770ae59de93d010b12f50e8800f", "content_id": "3f92f08576ba25d9e6efe2f90c15ac30b493bf01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1135, "license_type": "no_license", "max_line_length": 85, "num_lines": 40, "path": "/data_preprocessing_template.py", "repo_name": "spydir/mlaz", "src_encoding": "UTF-8", "text": "# data preprocessing\n\n# import libraries\n# import numpy\n# import matplotlib.pyplot as plt\nimport pandas as pd\n\n\n# import dataset\ndataset = pd.read_csv('data.csv')\nx = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 3].values\n\n\n# # taking care of missing data\n# from sklearn.impute import SimpleImputer\n# imputer = SimpleImputer()\n# imputer = imputer.fit(x[:, 1:3])\n# x[:, 1:3] = imputer = imputer.transform(x[:, 1:3])\n\n\n# # encoding categorical data\n# from sklearn.preprocessing import LabelEncoder, OneHotEncoder\n# labelencoder_x = LabelEncoder()\n# x[:,0] = labelencoder_x.fit_transform(x[:,0])\n# onehotencoder = OneHotEncoder(categorical_features=[0])\n# x = onehotencoder.fit_transform(x).toarray()\n# labelencoder_y = LabelEncoder()\n# y = labelencoder_y.fit_transform(y)\n\n\n# splitting data into the training and test sets.\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.2,random_state=0)\n\n# # feature scaling\n# from sklearn.preprocessing import StandardScaler\n# sc_x = StandardScaler()\n# x_train = sc_x.fit_transform(x_train)\n# x_test = sc_x.transform(x_test)" } ]
1
aarushraj3/Geo_creation
https://github.com/aarushraj3/Geo_creation
61fff1717258a53733be96e336840df668558c0c
4342eee0ea1954b29af2b8fdec107306d2c6cb04
a318a33ad38249f36e58fa8e50d00ba29447c98e
refs/heads/master
2016-06-14T11:28:56.264404
2016-05-12T15:19:30
2016-05-12T15:19:30
58,652,707
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6070806980133057, "alphanum_fraction": 0.622170627117157, "avg_line_length": 23.614286422729492, "blob_id": "8b2e5e8993d3f55826f36fbf173948b694e1494d", "content_id": "96ebc040f8b2133ec3c32726b39f739c0ffc5732", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1723, "license_type": "no_license", "max_line_length": 107, "num_lines": 70, "path": "/new.py", "repo_name": "aarushraj3/Geo_creation", "src_encoding": "UTF-8", "text": "import json\nfrom py2neo import Node, Relationship, Graph, authenticate\nimport random, string\n#(give your \"localhost/neo4j username/ neo4j password\")\nauthenticate(\"localhost:7474\",\"neo4j\",\"scibase\")\ng=Graph(\"http://localhost:7474/db/data\")\nwith open('geophysicaldata.json') as data_file:\n d = json.load(data_file)\n\ncity=[]\ncountry={}\n\ndef printd():\n\t\n\tfor cntnt, cntnt_v in d.items():\n\t\ttx=g.begin()\t\t\n\t\tprint (cntnt)\n\t\t#cntnt1=cntnt\n\t\tcntnt = Node(\"Continent\", Name=cntnt )\n\t\ttx.create(cntnt)\n\t\ttx.commit()\n\t\tfor cntry,cntry_v in cntnt_v.items():\n\t\t\ttx=g.begin()\t\t\t\n\t\t\tcntry1=cntry\n\t\t\tcntry = Node(\"Country\",Name=cntry )\n\t\t\trel=Relationship(cntry, \"part_of\", cntnt)\n\t\t\ttx.create(rel)\n\t\t\ttx.commit()\n\t\t\tcity= cntry_v[\"cities\"]\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\ttx=g.begin()\n\t#\t\tcty = \"MATCH (n:Country {Name:'\"+cntry+\") MERGE (m:City {Name:{'\"+B+\"'}) CREATE (m)-[:located_in]->(n)\"\n\t\t\tfor i in range(len(city)-1):\t\n\t\t\t\tif city[i] != \"\":\n\t\t\t\t\tcty = Node(\"City\",Name=city[i])\n\t\t\t\t\ttx.create(cty)\n\t\t\ttx.commit()\n\n\n\t\t\tfor i in range(len(city)-1):\t\n\t\t\t\trel=Relationship(city[i], \"located_in\", cntry)\n\t\t\t\ttx.create(rel)\n\t\t\t\n\t\t\ttx.commit()\n\t\t\t\t\t\n'''\t\t\t\tcntnt= cntnt_k\n\t\t\t\tcntnt1=cntnt\n\t\t\t\tcntnt = graph.merge_one(\"Continent\", \"Name\",cntnt1 )\n\t\t\t\thelp1(cntnt)\n\t\t\t\tprintd(cntnt_v)\n\t\t\t\tcntry= cntnt_k\n\t\t\t\tcntry1=cntry\n\t\t\t\tcntry = graph.merge_one(\"Country\", \"Name\",cntry1 )\n\t\t\t\tgraph.create_unique(Relationship(cntry, \"part_of\", name1))\n\t\t\t\thelp2(cntry)\t\t\t\t\n\t\t\t\tprintd(cntnt_v)\n\t\telif isinstance(cntnt_v, list):\n\t\t\ttx=graph.cypher.begin()\t\t\t\n\t\t\tfor val in cntnt_v:\n\t\t\t\tcty= val\n\t\t\t\tcty1=cty\n\t\t\t\tcty = tx.graph.merge_one(\"City\", \"Name\",cty1 )\n\t\t\t\ttx.graph.create_unique(Relationship(cty, \"located_in\", name2))\n\t\t\ttx.commit()'''\n\n\nprintd()\ntx.commit()\n" } ]
1
philkim72/DeepShack
https://github.com/philkim72/DeepShack
c7471fed6529f0b68b9850f9f21c137bdb9a74a2
2ae41a3c7f158c1f2614301113e18ecb01e222d9
0c3e9a394675b0499992965e7a6ea0ab3f30e517
refs/heads/poc
2020-05-15T19:45:47.103590
2019-04-29T00:19:16
2019-04-29T00:19:16
182,464,774
1
0
null
2019-04-20T23:43:55
2019-04-28T23:30:50
2019-04-29T00:00:40
Jupyter Notebook
[ { "alpha_fraction": 0.5563036799430847, "alphanum_fraction": 0.576967716217041, "avg_line_length": 35.193275451660156, "blob_id": "e141b3ec63196e24a71710275d3a2bf7b9704e8a", "content_id": "72057aee291ee0f09e0ec1ebf63b200f0a4b2c0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4307, "license_type": "no_license", "max_line_length": 85, "num_lines": 119, "path": "/train/model.py", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": "import abc\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.layers import (Activation, BatchNormalization, Conv2D,\n Dense, Dropout, Flatten, Input,\n MaxPooling2D, concatenate)\nfrom tensorflow.keras.models import Model, Sequential, load_model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.utils import plot_model\n\n\nclass BaseModel(metaclass=abc.ABCMeta):\n def __init__(self, input_shape, name, image_dir='.',\n existing_model_path=None):\n self.input_shape = input_shape\n self.name = name\n self.image_dir = image_dir\n\n if existing_model_path:\n self.model = load_model(existing_model_path)\n else:\n self.model = self.create_model()\n\n plot_model(self.model, show_shapes=True,\n to_file=f'{image_dir}/results/{name}.png')\n self.model.summary()\n\n @abc.abstractmethod\n def create_model(self):\n pass\n\n def train(self, x_train, y_train, epochs=30, batch_size=256,\n lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.001):\n\n fn_base = f'{self.image_dir}/results/{self.name}'\n\n opt = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, decay=decay)\n self.model.compile(optimizer=opt, loss='mse')\n\n mc = ModelCheckpoint(fn_base + '.h5', save_best_only=True, mode='min')\n self.model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size,\n validation_split=0.1, verbose=1, callbacks=[mc])\n self.model.save(fn_base + '_final.h5')\n\n df = pd.DataFrame.from_dict(self.model.history.history)\n df.to_csv(fn_base + '_history.csv', index=False)\n\n # Plot results\n fig, ax = plt.subplots(figsize=(8, 6))\n df.plot(y='loss', kind='line', ax=ax)\n df.plot(y='val_loss', kind='line', ax=ax)\n\n def evaluate(self, x_test, y_test):\n test_score = self.model.evaluate(x_test, y_test)\n print('Train score:', self.model.history.history['loss'][-1])\n print('Test score:', test_score)\n\n\nclass MultiScaleCNN(BaseModel):\n def _msb(self, filters):\n \"\"\"Multi-Scale Blob\"\"\"\n def func(inputs, bn=False):\n params = {'activation': 'relu', 'padding': 'same',\n 'kernel_regularizer': l2(5e-4)}\n outputs = concatenate([Conv2D(filters, 9, **params)(inputs),\n Conv2D(filters, 7, **params)(inputs),\n Conv2D(filters, 5, **params)(inputs),\n Conv2D(filters, 3, **params)(inputs)])\n if bn:\n outputs = BatchNormalization()(outputs)\n outputs = Activation('relu')(outputs)\n return outputs\n\n return func\n\n def create_model(self):\n \"\"\"multi-scale convolutional neural network\"\"\"\n inputs = Input(shape=self.input_shape)\n\n # Feature Remap\n outputs = Conv2D(filters=64, kernel_size=9,\n activation='relu', padding='same')(inputs)\n\n # Multi-scale Feature\n outputs = self._msb(4 * 16)(outputs)\n outputs = MaxPooling2D()(outputs)\n\n # Multi-scale Feature\n outputs = self._msb(4 * 32)(outputs)\n outputs = self._msb(4 * 32)(outputs)\n outputs = MaxPooling2D()(outputs)\n\n # Multi-scale Feature\n outputs = self._msb(3 * 64)(outputs)\n outputs = self._msb(3 * 64)(outputs)\n\n # Density Map Regression\n outputs = Conv2D(filters=1000, kernel_size=1, activation='relu',\n kernel_regularizer=l2(5e-4))(outputs)\n outputs = Conv2D(filters=1, kernel_size=1, activation='relu')(outputs)\n\n model = Model(inputs=inputs, outputs=outputs)\n return model\n\n\nclass FullyConnected(BaseModel):\n def create_model(self):\n model = Sequential([\n MaxPooling2D(input_shape=self.input_shape),\n Flatten(),\n Dropout(0.5),\n Dense(512, activation='relu'),\n Dropout(0.5),\n Dense(1, activation='relu')\n ])\n return model\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5, "avg_line_length": 13, "blob_id": "c6572cdc35f1ad4d677c4e017c69caeb1b8050a1", "content_id": "23197bb499d2659730d8c2c655d6b9e3aeafa3b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 28, "license_type": "no_license", "max_line_length": 15, "num_lines": 2, "path": "/predict_service/README.rst", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": "predict_service\n===========\n" }, { "alpha_fraction": 0.7072098255157471, "alphanum_fraction": 0.7250879406929016, "avg_line_length": 47.74285888671875, "blob_id": "78c1b04303838564702c9c7ddbba51f7f700988f", "content_id": "ff8bba985fe3bb5b1783d6c52b37643bfa74ec4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3412, "license_type": "no_license", "max_line_length": 430, "num_lines": 70, "path": "/README.rst", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": ".. image:: images/Deep-Shack.png\n :height: 100px\n :width: 200 px\n :scale: 50 %\n :align: center\n\n================\nDeepShack\n================\n---------------------------------------------------\nVisit Shake Shack with the help of Deep Learning!\n---------------------------------------------------\n\nDescription\n=============\n\n\nShake Shack is lunchtime (and dinnertime) favorite for people in Manhattan. The original location was opened in July 2004 in Madison Square Park. Its popularity is such that in the summer the wait in line for service can stretch to over an hour, especially on weekends when the weather is pleasant. A webcam on the restaurant's web page shows the current line in real time in order to inform customers of the length of the line.\n\n.. image:: images/shakeshack-1500848940.jpg\n :width: 200 px\n :align: center\n\n\nSo to ease the decision making process on whether to visit Shake Shack at any given time of the day, our service aims to notify subscribers of the number of people in line at the Madison Square Park location though Deep Learning. Either through an SMS or email, the service aims to notify the subscriber of the number of people waiting in line.\n\nThere are two main use cases or the service:\n\n- An email is sent at a user-determined scheduled time(s) during the day with a count of the number people in line.\n- A user sends a SMS message to the service, and will receive a count of the number of people in line.\n\nMVP Architecture\n================\n\n\n.. image:: images/MVP_Architecture.png\n :width: 200 px\n :align: center\n\nThere are 3 services which will be implemented on AWS Lamba. This design decouples each service and allows for a serverless architecture.\n\n- Scraper Service will send a GET request to the ShackCam and save the image onto an AWS S3 bucket, which creates an event.\n- Predict Service will listen to the event and load the model from AWS S3. It will estimate the actual crowd count using the trained model, then publish a message to AWS SNS (Simple Notification Service). Using the annotated ShackCam image set, we will fine tune the model utilizing transfer learning method and save the model on S3, which will be used by the Service.\n- Email/SMS Service will subscribe to the message and send an email or text with the predicted count.\n\n\n\nProject Requirements\n====================\n* The Service Architecture will satisfy the following requirements.\n\t- More than one service - We will have a total of 4 services.\n\t- Inter-service communication - We will use AWS SNS (Sub/Pub Messaging Service) to communicate between Predict and Email/SMS services.\n\t- We will use github to manage and merge the code base.\n\t- We will use JIRA to manage tasks.\n\t- We will use Standups and Retrospectives in order to seek transparency and open collaboration in an effort to discover bottlenecks, resolve issues, focus on flow and continuously improve our process.\n* The Service Architecture will contain the following additional features.\n\t- Train and Use a Model\n\t- Present Data (Email/SMS)\n\t- Event Driven Service\n\n\nAuthors and acknowledgment\n==========================\n\nTeam Members:\nE.K. Itoku (ii2155), Oscar Jasklowski (ovj2101), Phillip Kim (ppk2003), Ivan Ugalde(du2160), Sean Xu (cx2118)\n\nSpecial thanks to Dmitri (Dimroc) for inspiring the machine learning piece and sharing annotated images for training data set.\n\nhttps://blog.dimroc.com/2017/11/19/counting-crowds-and-lines\n" }, { "alpha_fraction": 0.6164233684539795, "alphanum_fraction": 0.6507299542427063, "avg_line_length": 36.27891159057617, "blob_id": "66114d163f51e238f2d5aed6a05e8deac6b62833", "content_id": "edfc7a3eaef94228743837fd4aaaa74fbb030522", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5480, "license_type": "no_license", "max_line_length": 148, "num_lines": 147, "path": "/predict_service/density.py", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": "# from crowdcount.ml.callbacks import DensityCheckpoint\n# from crowdcount.ml.generators import density as generator\n# from crowdcount.models import paths as ccp\nfrom tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, TensorBoard\nfrom tensorflow.keras.initializers import RandomNormal\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Input, average\nfrom tensorflow.keras.models import Sequential, load_model, Model as KModel\n# import attr\n# import crowdcount.ml as ml\nimport tensorflow.keras.optimizers\nimport os\n\n_msb_initializer = RandomNormal(stddev=0.01)\n\n\ndef train(model_path=None):\n model = _create_model(model_path)\n initial_epoch = ml.fetch_epoch(model_path)\n print(model.summary())\n\n model.fit_generator(generator.training(),\n generator.steps_per_epoch(),\n initial_epoch=initial_epoch,\n epochs=200 - initial_epoch,\n verbose=1,\n validation_data=generator.validation(),\n validation_steps=generator.validation_steps(),\n callbacks=_create_callbacks())\n\n test(model)\n\n\ndef test(model=None, model_path=None):\n if not model:\n model = _create_model(model_path)\n score = model.evaluate_generator(generator.validation(), steps=generator.validation_steps())\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n\n\n# @attr.s\nclass Model:\n# weights = attr.ib()\n\n def __attrs_post_init__(self):\n self.model = _create_model(self.weights)\n\n def predict(self, image_array):\n return self.model.predict(ml.image_to_batch(image_array), batch_size=1)\n\n def summary(self):\n return self.model.summary()\n\n\ndef predict(image_array, model_path):\n return Model(model_path).predict(image_array)\n\n\ndef _create_model(model_path=None):\n if model_path:\n print(\"Loading model for epoch {} from {}\".format(ml.fetch_epoch(model_path), model_path))\n return load_model(model_path)\n\n return _create_msb_model()\n\n\ndef _create_multicol_model():\n inputs = Input(shape=(None, None, 3))\n cols = [_create_column(d, inputs) for d in [3, 5, 9]]\n model = KModel(inputs=inputs, outputs=average(cols))\n return model\n# return _compile_model(model)\n\n\ndef _create_column(kernel_dimension, inputs):\n kd = kernel_dimension\n x = Conv2D(36, kernel_size=(kd, kd), activation='relu', padding='same')(inputs)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)\n x = Conv2D(72, (kd, kd), activation='relu', padding='same')(x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)\n x = Conv2D(36, (kd, kd), activation='relu', padding='same')(x)\n if kd == 9:\n kd = 7\n x = Conv2D(24, (kd, kd), activation='relu', padding='same')(x)\n x = Conv2D(16, (kd, kd), activation='relu', padding='same')(x)\n return Conv2D(1, (1, 1), activation='relu', kernel_initializer='random_normal')(x)\n\n\ndef _create_msb_model():\n \"\"\"\n Multiscale CNN for crowd counting:\n https://arxiv.org/pdf/1702.02359.pdf\n \"\"\"\n inputs = Input(shape=(None, None, 3))\n x = Conv2D(64, kernel_size=(9, 9), activation='relu', padding='same', kernel_initializer=_msb_initializer)(inputs)\n x = _create_msb(16, [9, 7, 5, 3], x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)\n x = _create_msb(32, [9, 7, 5, 3], x)\n x = _create_msb(32, [9, 7, 5, 3], x)\n x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)\n x = _create_msb(64, [7, 5, 3], x)\n x = _create_msb(64, [7, 5, 3], x)\n x = Conv2D(1000, (1, 1), activation='relu', kernel_initializer=_msb_initializer)(x)\n x = Conv2D(1, (1, 1), activation='relu', kernel_initializer=_msb_initializer)(x)\n model = KModel(inputs=inputs, outputs=x)\n return model\n# return _compile_model(model)\n\n\ndef _create_msb(filters, dimensions, inputs):\n \"\"\"\n Multi-scale Blob as described in https://arxiv.org/pdf/1702.02359.pdf\n \"\"\"\n cols = [Conv2D(filters, kernel_size=(d, d), activation='relu', padding='same', kernel_initializer=_msb_initializer)(inputs) for d in dimensions]\n return average(cols)\n\n\ndef _create_congested_fcn():\n \"\"\"\n Based on the model proposed by Fully Convolutional Crowd Counting On Highly Congested Scenes:\n https://arxiv.org/pdf/1612.00220.pdf\n \"\"\"\n model = Sequential()\n model.add(Conv2D(36, kernel_size=(9, 9), activation='relu', input_shape=(None, None, 3), padding='same'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Conv2D(72, (7, 7), activation='relu', padding='same'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Conv2D(36, (7, 7), activation='relu', padding='same'))\n model.add(Conv2D(24, (7, 7), activation='relu', padding='same'))\n model.add(Conv2D(16, (7, 7), activation='relu', padding='same'))\n model.add(Conv2D(1, (1, 1), padding='same', kernel_initializer='random_normal'))\n return model\n\n\ndef _compile_model(model):\n model.compile(loss='mean_squared_error',\n optimizer=keras.optimizers.adam(lr=1e-5, decay=5e-5),\n metrics=['mae', 'mse', 'accuracy'])\n return model\n\n\ndef _create_callbacks():\n os.makedirs(ccp.output('weights'), exist_ok=True)\n return [CSVLogger(ccp.output('keras_history.csv'), append=True),\n ModelCheckpoint(ccp.output(\"weights/weights.{epoch:02d}-{val_loss:.2f}.hdf5\")),\n TensorBoard(log_dir=ccp.output('tensorboard')),\n DensityCheckpoint(\"data/shakecam/shakeshack-1504543773.jpg\")]\n" }, { "alpha_fraction": 0.612552285194397, "alphanum_fraction": 0.6217573285102844, "avg_line_length": 25.55555534362793, "blob_id": "4618facb8214749443c8abb18a10df03ad7f71f3", "content_id": "aa3315194e9a5db73d62064e600c5460ca21e6b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1195, "license_type": "no_license", "max_line_length": 64, "num_lines": 45, "path": "/scraper_service/main.py", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": "from datetime import datetime\nfrom time import sleep\nfrom botocore.vendored import requests\nfrom fake_useragent import UserAgent\nimport boto3\n\n\ndef make_request():\n url = 'http://cdn.shakeshack.com/camera.jpg'\n headers = {'user-agent': UserAgent().random}\n r = requests.get(url, stream=True, headers=headers)\n if r.headers['content-length'] == '0':\n raise ValueError('No image was loaded, trying again...')\n else:\n return r\n\n\ndef scrape_handler(event, context):\n session = boto3.Session()\n s3 = session.resource('s3')\n\n # Try 3 times to load an image\n for x in range(0, 3):\n try:\n r = make_request()\n except ValueError as err:\n print(err)\n sleep(5)\n pass\n\n # Create image filename\n timestamp = datetime.now()\n timestamp_str = timestamp.strftime('%Y-%m-%d_%H%M-%S')\n filename = f'shackcam/{timestamp_str}.jpg'\n\n # Upload image to S3\n bucket_name = 'deepshack'\n bucket = s3.Bucket(bucket_name)\n r.raw.decode_content = True\n bucket.upload_fileobj(r.raw, filename)\n\n return {\n 'image size (bytes)': r.headers['content-length'],\n 'filename': filename\n }\n" }, { "alpha_fraction": 0.5600000023841858, "alphanum_fraction": 0.5647059082984924, "avg_line_length": 24.75757598876953, "blob_id": "211fac495f6af8c0276ae5a1df93c82d57930d39", "content_id": "21e01ab13ac44f191171c08d74eedde9ae768a37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 850, "license_type": "no_license", "max_line_length": 68, "num_lines": 33, "path": "/sms_service/main.py", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": "import json\nimport boto3\nimport os\n\n# Initialize SNS client for Ireland region\nsession = boto3.Session(\n region_name=\"us-east-1\"\n)\nsns_client = session.client('sns')\n\ndef lambda_handler(event, context):\n\n event_message = json.load(event['Records'][0]['Sns']['Message'])\n pred = event_message['prediction']\n filename = event_message['filename']\n\n # Send SMS\n response = sns_client.publish(\n PhoneNumber=os.environ['ivan_number'],\n Message='There are only '+str(pred)+' people in the line',\n MessageAttributes={\n 'AWS.SNS.SMS.SenderID': {\n 'DataType': 'String',\n 'StringValue': 'DeepShack'\n },\n 'AWS.SNS.SMS.SMSType': {\n 'DataType': 'String',\n 'StringValue': 'Promotional'\n }\n }\n )\n\n return 'OK'\n" }, { "alpha_fraction": 0.5559422373771667, "alphanum_fraction": 0.5689924955368042, "avg_line_length": 32.02873611450195, "blob_id": "075af8de508de9e11b308d9117726bda1c10e547", "content_id": "f2df27753768bcbbcad8ff9947e4912d420dccd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5747, "license_type": "no_license", "max_line_length": 79, "num_lines": 174, "path": "/train/image_handlers.py", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": "import json\nfrom collections import OrderedDict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image, ImageDraw\nimport cv2\n\n\nclass ImageLoader(object):\n def __init__(self, image_dir=None, new_shape=224, gaussian_ksize=15):\n self.org_shape = None\n self.new_shape = (new_shape, new_shape)\n self.gaussian_ksize = (gaussian_ksize, gaussian_ksize)\n self.image_dir = image_dir\n self.c = 4\n self.data = OrderedDict()\n\n def _read_annotations(self, filepath):\n \"\"\"Read annotation from a JSON\"\"\"\n with open(filepath, 'r') as f:\n annots = json.load(f)\n\n return annots\n\n def _read_image(self, filepath):\n \"\"\"\n Read an image, reshape to self.new_shape, then divide by 255\n \"\"\"\n org_img = cv2.imread(filepath)\n new_img = cv2.resize(org_img, self.new_shape)\n self.org_shape = org_img.shape\n return new_img / 255\n\n def _read_gaussian_image(self, annots):\n \"\"\"\n Create a (x, y, 1) dimension image by applying Gaussian kernel\n Annotations are (x, y) but numpy pixels are (y, x).\n \"\"\"\n new_shape = self.new_shape[0]//self.c, self.new_shape[1]//self.c\n img = np.zeros(new_shape)\n y_scaler = new_shape[0]/self.org_shape[0]\n x_scaler = new_shape[1]/self.org_shape[1]\n\n scaled_annots = []\n for x, y in annots:\n x_scaled = int(round(x * x_scaler))\n y_scaled = int(round(y * y_scaler))\n\n # Discard annotations that are out of the frame\n if x_scaled < new_shape[0] and y_scaled < new_shape[1]:\n img[y_scaled, x_scaled] += 1\n scaled_annots.append((x_scaled, y_scaled))\n\n gimg = cv2.GaussianBlur(src=img, ksize=self.gaussian_ksize, sigmaX=0)\n gimg = np.expand_dims(gimg, axis=-1)\n return gimg, scaled_annots\n\n def load_train_data(self):\n \"\"\"Read annotations, images, and annotated Gaussian images\"\"\"\n # Load annotations\n dir_ = self.image_dir\n self.annots = self._read_annotations(f\"{dir_}/annotation.json\")\n\n # Load image and gaussian image\n for fn, org_annots in self.annots.items():\n org_img = self._read_image(f\"{dir_}/frames/{fn}\")\n gaussian_img, scaled_annots = self._read_gaussian_image(org_annots)\n self.data[fn] = {'org_img': org_img,\n 'gaussian_img': gaussian_img,\n 'org_annots': org_annots,\n 'scaled_annots': scaled_annots}\n\n def plot_image(self, i=None, filename=None):\n \"\"\"\n Plot an image and annotated image side by side.\n Either pass i or filename\n \"\"\"\n if isinstance(i, int):\n _, item = list(self.data.items())[i]\n elif filename:\n item = self.data[filename]\n else:\n raise ValueError('Pass either index or filename')\n\n org_img = item['org_img']\n gaussian_img = item['gaussian_img']\n scaled_annots = item['scaled_annots']\n\n figs, axes = plt.subplots(1, 3, figsize=(15, 5))\n\n # Original image\n axes[0].imshow(org_img)\n\n # Annotation\n img_array = (org_img*255).astype('uint8')\n img = Image.fromarray(img_array)\n draw = ImageDraw.Draw(img)\n for scaled_x, scaled_y in scaled_annots:\n draw.text((scaled_x*self.c, scaled_y*self.c), \"X\", fill=\"red\")\n axes[1].imshow(img)\n\n # Gaussian image, converting from 3D to 2D\n axes[2].imshow(gaussian_img[:, :, 0])\n\n @property\n def org_img(self):\n return np.array([v['org_img'] for v in self.data.values()])\n\n @property\n def gaussian_img(self):\n return np.array([v['gaussian_img'] for v in self.data.values()])\n\n @property\n def files(self):\n return self.data.keys()\n\n @property\n def count(self):\n return [len(v) for v in self.annots.values()]\n\n\nclass ShackCamLoader(ImageLoader):\n def __init__(self, image_dir=None, new_shape=224, gaussian_ksize=15):\n ImageLoader.__init__(self, image_dir, new_shape, gaussian_ksize)\n\n gaussian_shape = self.new_shape[0]//self.c, self.new_shape[1]//self.c\n mask = cv2.imread(f\"{self.image_dir}/line_mask.png\", 0) // 255\n mask = cv2.resize(mask, gaussian_shape)\n self.mask = (mask == 0)\n\n def mask_img(self, img):\n img = img.copy()\n img[self.mask] = 0\n return img\n\n @property\n def masked_gaussian_img(self):\n imgs = [self.mask_img(v['gaussian_img']) for v in self.data.values()]\n return np.array(imgs)\n\n def plot_image(self, i=None, filename=None):\n \"\"\"\n Plot an image and annotated image side by side.\n Either pass i or filename\n \"\"\"\n if isinstance(i, int):\n _, item = list(self.data.items())[i]\n elif filename:\n item = self.data[filename]\n else:\n raise ValueError('Pass either index or filename')\n\n org_img = item['org_img']\n gaussian_img = item['gaussian_img']\n scaled_annots = item['scaled_annots']\n\n figs, axes = plt.subplots(1, 4, figsize=(20, 5))\n\n # Original image\n axes[0].imshow(org_img)\n\n # Annotation\n img_array = (org_img*255).astype('uint8')\n img = Image.fromarray(img_array)\n draw = ImageDraw.Draw(img)\n for scaled_x, scaled_y in scaled_annots:\n draw.text((scaled_x*self.c, scaled_y*self.c), \"X\", fill=\"red\")\n axes[1].imshow(img)\n\n # Gaussian image, converting from 3D to 2D\n axes[2].imshow(gaussian_img[:, :, 0])\n\n axes[3].imshow(self.mask_img(gaussian_img[:, :, 0]))\n" }, { "alpha_fraction": 0.5941722989082336, "alphanum_fraction": 0.6368243098258972, "avg_line_length": 25.311111450195312, "blob_id": "31540c81d3f64f2b17475b04b6655d01c1896e70", "content_id": "fabe390a0969b00d0626dcca70cf38137291185f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2368, "license_type": "no_license", "max_line_length": 71, "num_lines": 90, "path": "/predict_service/main.py", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": "import json\nimport random\nimport tempfile\n\nimport numpy as np\nimport boto3\nimport cv2\nfrom tensorflow.python.keras.models import load_model\n\n\nS3_BUCKET = 'deepshack'\nMSCNN_MODEL_PATH = 'model/shackcam_final.h5'\nFC_MODEL_PATH = 'model/shackcam_fc_final.h5'\nMASK_PATH = 'train/data/shackcam/line_mask.png'\n\n\ndef predict_handler(event, context):\n s3_message = event['Records'][0]['s3']\n\n message = {'prediction': random.randint(0, 10),\n 'filename': s3_message['object']['key']}\n\n sns = boto3.client('sns')\n response = sns.publish(\n TopicArn='arn:aws:sns:us-east-1:245636212397:dlresult',\n Message=json.dumps({'default': json.dumps(message)}),\n MessageStructure='json'\n )\n\n return {'statusCode': 200, 'body': json.dumps(message)}\n\n\ndef load_s3_object(key, func, **kwargs):\n # Load S3 object as byte string\n s3 = boto3.client('s3')\n obj = s3.get_object(Bucket=S3_BUCKET, Key=key)\n bytestr = obj['Body'].read()\n\n # Create a temp file and read it with the supplied function\n with tempfile.NamedTemporaryFile() as tmp:\n tmp.write(bytestr)\n data = func(tmp.name, **kwargs)\n\n return data\n\n\ndef transform_image(img, new_shape):\n \"\"\"Crop, resize, mean normalize, and change from 3D to 4D\"\"\"\n img = img[0:720, 0:720]\n img = cv2.resize(img, (new_shape, new_shape)) / 255\n img = np.expand_dims(img, axis=0) # 3D to 4D\n return img\n\n\ndef mask_image(img):\n img = img[0, :, :, 0].copy() # 4D to 2D\n new_shape = img.shape[0:2]\n\n mask = load_s3_object(MASK_PATH, cv2.imread, flags=0)\n mask = cv2.resize(mask, new_shape) // 255\n mask = (mask == 0)\n img[mask] = 0\n\n img = np.expand_dims(img, axis=0) # (40, 40) to (1, 40, 40)\n img = np.expand_dims(img, axis=-1) # (1, 40, 40) to (1, 40, 40, 1)\n\n return img\n\n\ndef predict(filename):\n img = load_s3_object(filename, cv2.imread)\n gaussian = predict_mscnn(img)\n masked = mask_image(gaussian)\n count = predict_fc(masked)\n return int(round(count))\n\n\ndef predict_mscnn(img):\n model = load_s3_object(MSCNN_MODEL_PATH, load_model)\n new_shape = model.input_shape[1]\n\n img_4d = transform_image(img, new_shape)\n pred_4d = model.predict(img_4d)\n return pred_4d\n\n\ndef predict_fc(img):\n model = load_s3_object(FC_MODEL_PATH, load_model)\n pred = model.predict(img)\n return pred[0][0]\n" }, { "alpha_fraction": 0.5990037322044373, "alphanum_fraction": 0.6683270931243896, "avg_line_length": 33.91304397583008, "blob_id": "73740c976e173dabc755c71f84810ab627ad6b45", "content_id": "e0c36de2cf6d7df3da5ad2b21d73aaf015c1803c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2409, "license_type": "no_license", "max_line_length": 392, "num_lines": 69, "path": "/email_service/README.rst", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": "=============\nemail_service\n=============\nThis function will read the messages sent from the predict_service and send an\nemail to the address stored in the environment variable `gmail_username`.\n\n***************\nDependencies\n***************\nThis function depends on the following packages:\n\n- json\n- os\n- boto3\n- smtplib\n\n***************\nExecution\n***************\nAWS services provides the functionality to trigger notifications to different services like email or SMS via its Simple Notification Services (AWS SNS) by creating a topic (a group of services) and publishing to it. AWS SNS will send the message published to each of the services subscribed to that specific topic. The predict_service publishes to the topic where this function is subscribed.\n\nUpon execution, this function follows these steps:\n\n1. Creates a SMTP protocol client\n\n2. Logs in using **gmail_username** and **gmail_username** environment variables\n\n3. Send email\n\nExample\n^^^^^^^^^^^^^^^^^^\n\n- **predict_service** will publish a message similar to this:\n\n.. code-block:: JSON\n\n {\n \"Records\": [\n {\n \"EventSource\": \"aws:sns\",\n \"EventVersion\": \"1.0\",\n \"EventSubscriptionArn\": \"arn:aws:sns:us-east-1:245636212397:dlresult:b2a4ea7f-a427-41c3-8283-f27707aa0929\",\n \"Sns\": {\n \"Type\": \"Notification\",\n \"MessageId\": \"3851a2b7-c171-5918-963a-f78a135502a0\",\n \"TopicArn\": \"arn:aws:sns:us-east-1:245636212397:dlresult\",\n \"Subject\": \"None\",\n \"Message\": {\n \"prediction\": 6,\n \"filename\": \"2019-04-25_2323-04.jpg\"\n },\n \"Timestamp\": \"2019-04-27T03:43:32.490Z\",\n \"SignatureVersion\": \"1\",\n \"Signature\": \"DxzOXWDeW7TaGPMmrnXjCDxzfUBB9q/su6FOY7BENXbzGFhnm1OthglqDxe1+oGlinD5mM87IoCBzNPN3Vu1lTNXJVoqTvBEwY8F0VwZknPZVXJT/uzsvE45YhR96GbNZimUBYMH7RGDKPh++5ONiPz2UOyzVukOJ2GiIMLIS+oe+i4h+4CiXjhSVXArJDeETkzfAd67s012qObR5ly37BQxyUXWkNaoA/umQorqwDVpvfftFsj7SVSuCbAhYzN4WhrIq63NwYzESi3YwfZ83PXw/abonzy1/9POAm+QMW3ttHyjk6bzcTCRYfe4Nu2uihYF9xYMvTc2ncT0LsMKsA==\",\n \"SigningCertUrl\": \"https://sns.us-east-1.amazonaws.com/SimpleNotificationService-6aad65c2f9911b05cd53efda11f913f9.pem\",\n \"UnsubscribeUrl\": \"https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:245636212397:dlresult:b2a4ea7f-a427-41c3-8283-f27707aa0929\",\n \"MessageAttributes\": {}\n }\n }\n ]\n }\n\n- sms_service will read **predict** and **filename** from there\n- It will send an email to the address stored in the environment variable **gmail_username**\n\nImprovements for future sprints\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n- Create logic to filter when messages sent to email addresses\n- Send emails only to a list of subscribers as opposed to a single address\n" }, { "alpha_fraction": 0.6491228342056274, "alphanum_fraction": 0.6561403274536133, "avg_line_length": 27.5, "blob_id": "2fd7049c01a3f2d638fa1825239834a7c9124836", "content_id": "c87d9303bef77b7506eb1c791cc2586e5c4cd257", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 570, "license_type": "no_license", "max_line_length": 71, "num_lines": 20, "path": "/email_service/main.py", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": "import json\nimport os\nimport random\nimport smtplib\n\n\ndef email_handler(event, context):\n subject = \"Shack Alert\"\n\n event_message = json.loads(event['Records'][0]['Sns']['Message'])\n pred = event_message['prediction']\n fileneme = event_message['filename']\n body = f\"{fileneme} was scraped\\nPredicted value is {pred}\"\n message = f\"Subject: {subject}\\n\\n{body}\"\n\n s = smtplib.SMTP('smtp.gmail.com', 587)\n s.starttls()\n s.login(os.environ['gmail_username'], os.environ['gmail_password'])\n s.sendmail(\"[email protected]\", \"[email protected]\", message)\n s.quit()\n" }, { "alpha_fraction": 0.5999184250831604, "alphanum_fraction": 0.6688417792320251, "avg_line_length": 34.536231994628906, "blob_id": "705f3dd0222f7ec285b1b0fbf451fc6275a3fd64", "content_id": "cf83f363596ac8805b00dd64049df7bac0c24cf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2452, "license_type": "no_license", "max_line_length": 392, "num_lines": 69, "path": "/sms_service/README.rst", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": "============\nsms_service\n============\nThis function will read the messages sent from the predict_service and send a\nSMS message to the number stored in the environment variable `ivan_number`.\n\n***************\nDependencies\n***************\nThis function depends on the following packages:\n\n- json\n- os\n- boto3\n\n***************\nExecution\n***************\nAWS services provides the functionality to trigger notifications to different services like email or SMS via its Simple Notification Services (AWS SNS) by creating a topic (a group of services) and publishing to it. AWS SNS will send the message published to each of the services subscribed to that specific topic. The predict_service publishes to the topic where this function is subscribed.\n\nUpon execution, this function follows these steps:\n\n1. Creates a boto3 session for a specific (geographical) region\n\n2. With that section, it then creates a client for SNS\n\n3. Finally, it uses the client to publish the SMS message\n\nExample\n^^^^^^^^^^^^^^^^^^\n\n- predict_service will publish a message similar to this:\n\n.. code-block:: JSON\n\n {\n \"Records\": [\n {\n \"EventSource\": \"aws:sns\",\n \"EventVersion\": \"1.0\",\n \"EventSubscriptionArn\": \"arn:aws:sns:us-east-1:245636212397:dlresult:b2a4ea7f-a427-41c3-8283-f27707aa0929\",\n \"Sns\": {\n \"Type\": \"Notification\",\n \"MessageId\": \"3851a2b7-c171-5918-963a-f78a135502a0\",\n \"TopicArn\": \"arn:aws:sns:us-east-1:245636212397:dlresult\",\n \"Subject\": \"None\",\n \"Message\": {\n \"prediction\": 6,\n \"filename\": \"2019-04-25_2323-04.jpg\"\n },\n \"Timestamp\": \"2019-04-27T03:43:32.490Z\",\n \"SignatureVersion\": \"1\",\n \"Signature\": \"DxzOXWDeW7TaGPMmrnXjCDxzfUBB9q/su6FOY7BENXbzGFhnm1OthglqDxe1+oGlinD5mM87IoCBzNPN3Vu1lTNXJVoqTvBEwY8F0VwZknPZVXJT/uzsvE45YhR96GbNZimUBYMH7RGDKPh++5ONiPz2UOyzVukOJ2GiIMLIS+oe+i4h+4CiXjhSVXArJDeETkzfAd67s012qObR5ly37BQxyUXWkNaoA/umQorqwDVpvfftFsj7SVSuCbAhYzN4WhrIq63NwYzESi3YwfZ83PXw/abonzy1/9POAm+QMW3ttHyjk6bzcTCRYfe4Nu2uihYF9xYMvTc2ncT0LsMKsA==\",\n \"SigningCertUrl\": \"https://sns.us-east-1.amazonaws.com/SimpleNotificationService-6aad65c2f9911b05cd53efda11f913f9.pem\",\n \"UnsubscribeUrl\": \"https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:245636212397:dlresult:b2a4ea7f-a427-41c3-8283-f27707aa0929\",\n \"MessageAttributes\": {}\n }\n }\n ]\n }\n\n- sms_service will read **predict** and **filename** from there\n- It will send a SMS to the number stored in the environment variable **ivan_number** saying\n \"There are only 6 people in the line\"\n\nImprovements for future sprints\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n- Create logic to filter when messages sent to SMS\n- Send SMS to a list of subscribers as opposed to a single number\n" }, { "alpha_fraction": 0.6957891583442688, "alphanum_fraction": 0.7063878774642944, "avg_line_length": 28.559322357177734, "blob_id": "eb03e3e7e6596c8b3b183f2b2fc6b091f0ad6921", "content_id": "0d0194c6bb550ac1994d52d5cafa58824bec59ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3491, "license_type": "no_license", "max_line_length": 254, "num_lines": 118, "path": "/scraper_service/README.rst", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": "================\n Scraper Service\n================\n\nShake Shack provides real-time images of the store-front on the Shake Shack website. In order to count the number of people in line, we use these images as our starting point. \n\nThis scraper service downloads the images and saves them to S3. From S3, the images are used by downstream services. The service can be run on a schedule or *ad hoc*.\n\n\n\nPrerequisites\n=============\n\nThis service has three dependencies:\n1. AWS Lambda\n2. Python Libraries\n3. Amazon S3\n\n\n1. AWS Lambda\n-----------------\n\nThis is an Amazon product that allows you to create serverless applications. One can create an account and get started with Lambda here: https://aws.amazon.com/lambda/\n\n\n2. Python Libraries\n-------------------\n\nThis service depends on two python libraries:\n\n\na. requests \n~~~~~~~~~~~~~~~~~~~~~~\n\nThis library allows you to make HTTP requests with Python. This is library is readily available with AWS Lambda. Simply including the following code in your Lambda function:\n`from botocore.vendored import requests`\n\n\nb. fake_useragent\n~~~~~~~~~~~~~~~~~~~~~~\n\nWe use this library in conjunction with the requests library because, when making repeated HTTP requests, we want to simulate using different browsers for each programmatic request.\n\nBecause this library is not readily available to AWS Lambda, we have to upload it to AWS. The process is as follows (`see here for detailed instructions <https://medium.com/@qtangs/creating-new-aws-lambda-layer-for-python-pandas-library-348b126e9f3e>`_):\n\nInstall package locally by creating a `requirements.txt` file containing the following:\n\n.. code-block:: bash\n\n fake-useragent==0.1.11\n\nCreate the following packaging script. When executed, this will install the packages in your requirements file:\n\n.. code-block:: bash\n #!/bin/bash\n\n export PKG_DIR=\"python\"\n\n rm -rf ${PKG_DIR} && mkdir -p ${PKG_DIR}\n\n docker run --rm -v $(pwd):/foo -w /foo lambci/lambda:build-python3.6 \\\n pip install -r requirements.txt --no-deps -t ${PKG_DIR}\n\n\nExecute the packaging script to create zip file:\n\n.. code-block:: ruby\n\n chmod +x get_layer_packages.sh\n ./get_layer_packages.sh\n zip -r my-Python36-Pandas23.zip .\n\n\nOnce you have created a zipfile with the fake_useragent library, upload it to your Lambda function with the UI.\n\n\n3. Amazon S3\n-----------------\n\nWe chose Amazon S3 to store scraped images. After setting up an S3 bucket, we needed to grant our scraper service \"full access\" to the S3 bucket. This enables the service to write to S3. Access to S3 can be granted through Amazon's IAM UI.\n\n\n\n\n\nRunning the Service\n====================\n\nThe scraper service does not require any inputs, so it can be triggered at any time. We trigger the service in one of two ways:\n\n\na. On a Schedule \n-----------------\n\nAWS Cloudwatch provides a mechanism to schedule the execution of AWS Lambda functions. We scheduled the service to run every 30 minutes.\n\n\nb. *Ad hoc* \n-----------------\n\nIn a subsequent iteration, we will use another service (check_line) to make an API call to trigger the scraper service.\n\n\n\n\nDeployment\n=============\n\nThe scraper service is manually deployed from Github (used for version control and managing codebase). We will set up continuous integration in a subsequent version.\n\n\n\n\nBuilt With\n=============\n\n- `AWS Lambda <https://aws.amazon.com/lambda/>`_ - Serverless framework used\n- `S3 <https://aws.amazon.com/s3/getting-started/>`_ - File management\n\n\n\n" }, { "alpha_fraction": 0.7260273694992065, "alphanum_fraction": 0.7260273694992065, "avg_line_length": 35.5, "blob_id": "e240d51ed3690cf58b407ed294f76805881d1069", "content_id": "93bfb81f327e7f1a072f8c9d15337d4c0812b151", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 73, "license_type": "no_license", "max_line_length": 60, "num_lines": 2, "path": "/train/README.rst", "repo_name": "philkim72/DeepShack", "src_encoding": "UTF-8", "text": "Multi-Scale Convolutional Neural Networks for Crowd Counting\n===========\n" } ]
13
L-D-Luffy/CV_Coursework
https://github.com/L-D-Luffy/CV_Coursework
017459c1e9f0aa8d2b6486b3fcde5517635df794
5cfac29a7d3ab7746fd9ff3c5bd7fcd6264d8dc1
5b535a6e4a47c87e24464a31a5a2c4152324c50f
refs/heads/master
2020-07-19T21:54:58.270199
2019-09-11T14:25:25
2019-09-11T14:25:25
206,520,663
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5209024548530579, "alphanum_fraction": 0.5723291039466858, "avg_line_length": 29.454545974731445, "blob_id": "d47df997935684067f2777fd3c0836347eb0730b", "content_id": "9b9c76982bcde66e1026ec14eba5f6ce83dceade", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3068, "license_type": "no_license", "max_line_length": 80, "num_lines": 99, "path": "/Day2_Bp/Bp.py", "repo_name": "L-D-Luffy/CV_Coursework", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport sklearn.datasets\nimport numpy as np\nimport sklearn.linear_model\nimport matplotlib\n\ndef initialize(input_num, layer1_num, layer2_num):\n model = dict()\n np.random.seed(12)\n w1 = np.random.randn(input_num, layer1_num)\n b1 = np.random.randn(1, layer1_num)\n w2 = np.random.randn(layer1_num, layer2_num)\n b2 = np.random.randn(1, layer2_num)\n model['w1'] = w1\n model['b1'] = b1\n model['w2'] = w2\n model['b2'] = b2\n return model\n\ndef predict(model, x):\n w1, b1, w2, b2 = model['w1'], model['b1'], model['w2'], model['b2']\n z1 = x.dot(w1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(w2) + b2\n exp_scores = np.exp(z2)\n probs = exp_scores/np.sum(exp_scores, axis=1, keepdims=True)\n return np.argmax(probs, axis=1)\n\ndef forward(model, x, y):\n epsilon = 0.043\n reg_lamda =0.0001\n x = x.reshape(1,2) #为什么要reshape?\n w1, b1, w2, b2 = model['w1'], model['b1'], model['w2'], model['b2']\n z1 = x.dot(w1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(w2) + b2\n exp_scores = np.exp(z2)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)#维度问题\n loss = -np.log(probs[0,y])\n\n #导数信息\n delta3 = probs\n delta3[0, y] -= 1#原来的代码时使用迭代器一起取数\n dw2 = (a1.T).dot(delta3)\n db2 = np.sum(delta3, axis=0, keepdims=True)\n delta2 = delta3.dot(w2.T)*(1-np.power(a1,2))\n dw1 = np.dot(x.T, delta2)\n\n db1 = np.sum(delta2, axis=0)\n\n dw2 += reg_lamda*w2\n dw1 += reg_lamda*w1\n\n w1 += -epsilon*dw1\n b1 += -epsilon * db1\n w2 += -epsilon * dw2\n b2 += -epsilon * db2\n\n model['w1'] = w1\n model['b1'] = b1\n model['w2'] = w2\n model['b2'] = b2\n return loss, model\n\ndef plot_decision_boundary(pred_func, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole gid\n Z = pred_func(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)\n\ndef main():\n epochs = 200\n matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)\n np.random.seed(3)\n X, Y = sklearn.datasets.make_moons(200, noise=0.20)\n print(len(X))\n model = initialize(2, 5, 2)\n for epoch in range(epochs):\n loss_ep = 0\n for i in range(len(X)):\n loss, model = forward(model, X[i], Y[i])\n loss_ep += loss\n loss_ep = loss_ep/len(X)\n if epoch%100==0:\n print('epoch:', epoch, 'loss:', loss_ep, '\\n')\n plot_decision_boundary(lambda x: predict(model, x), X, Y)\n plt.scatter(X[:, 0], X[:, 1], s=40, c=Y, cmap=plt.cm.Spectral)\n plt.show()\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5449826717376709, "alphanum_fraction": 0.6228373646736145, "avg_line_length": 22.4489803314209, "blob_id": "c4be9a43bb9974155ea749b0f6a4fa3e8d4a5cc7", "content_id": "5d8e83a26e3681962a7186e63c7a51c9ffebcd8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1296, "license_type": "no_license", "max_line_length": 76, "num_lines": 49, "path": "/Day1_LinearClass/linearCla.py", "repo_name": "L-D-Luffy/CV_Coursework", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n#对每一个样本,更新一次W,并同时返回误差\ndef linear_pre(W,X1,Y):\n learn_rate = 0.16\n Result = X1.dot(W.T)\n if Result >0:\n Ypre = 1\n else:\n Ypre = 0\n Wnew = W + learn_rate*(Y-Ypre)*X1\n error = Y - Ypre\n return Wnew,error\n\nnp.random.seed(12)\n\nnum_observations = 500\n\nx1 = np.random.multivariate_normal([0,0],[[1,.75],[.75,1]],num_observations)\nx2 = np.random.multivariate_normal([1,4],[[1,.75],[.75,1]],num_observations)\n\nX = np.vstack((x1,x2)).astype(np.float32)\nY = np.hstack((np.zeros(num_observations),np.ones(num_observations)))\n\nX1 = np.hstack((X,np.ones([2*num_observations,1])))\nW = np.random.randn(1,3)\n\nerror_sum = 0\ngamma = 0.00001\n#这里想加上那个误差范围的判断,但一开始误差都是0,所以就设定让它在500次之后再生效了,\n#你们有好的写法就改一下\nfor i in range(len(X1)):\n W,error = linear_pre(W, X1[i], Y[i])\n error_sum += error\n if (error_sum/(i+1) < gamma)&(i>500):\n break\n\n\nf1=plt.figure(1)\n\nplt.scatter(X[0:500,0],X[0:500,1],c = 'red')\nplt.scatter(X[500:,0],X[500:,1],c ='green')\n\nx_line = np.linspace(-3,3,1000)\ny_line = -((W[0,0]*x_line+W[0,2])/W[0,1])\n\nplt.plot(x_line,y_line,color = 'black')\n\nplt.show()\n\n\n\n\n\n\n\n" } ]
2
Yatheen07/django-labelDetection-GCP
https://github.com/Yatheen07/django-labelDetection-GCP
674b47e2a7308951f9a07a5c049c4dc5758f11ab
56a90a9f4433125f6543dc86479d7f0b6131e555
f6e9b51ad7777d5d41d66ccdf20733d4b832168b
refs/heads/master
2020-03-26T12:34:02.131385
2018-08-15T20:09:03
2018-08-15T20:09:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49242424964904785, "alphanum_fraction": 0.5, "avg_line_length": 16.53333282470703, "blob_id": "86c12d55e6a835f72e22454748362f984d1b4785", "content_id": "6012d8a25e6ad6ac2e0fbc1be000231d92a675c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 264, "license_type": "no_license", "max_line_length": 44, "num_lines": 15, "path": "/labelDetection/static/labelDetection/js/userdefinedScripts.js", "repo_name": "Yatheen07/django-labelDetection-GCP", "src_encoding": "UTF-8", "text": "$(document).ready(function(){\n\t\n\t$(\"#card1\").hide();\n\t\n $(\"#imageUpload\").on(\"click\",function(){\n\t\t$(\"#card1\").show();\n\t\t$.ajax({\n\t\t\turl : 'detectLabels/',\n\t\t\tmethod: 'POST',\n\t\t\tsuccess: function(data){\n\t\t\t\t$(\"#placeholder\").html(data);\n\t\t\t}\n\t\t});\n });\n});\n\n" }, { "alpha_fraction": 0.5784919857978821, "alphanum_fraction": 0.5791100263595581, "avg_line_length": 22.128570556640625, "blob_id": "3fbaa61bb617bdba5494eba43beef923c573729b", "content_id": "0813f9372ac622cfb2aa24747f47c23e6ac10d95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1618, "license_type": "no_license", "max_line_length": 134, "num_lines": 70, "path": "/labelDetection/static/labelDetection/js/interactive_form.js", "repo_name": "Yatheen07/django-labelDetection-GCP", "src_encoding": "UTF-8", "text": "$('.dataDesc').on(\"change keyup paste\",\n function(){\n if($(this).val()){\n $('.icon-paper-plane').addClass(\"next\");\n } else {\n $('.icon-paper-plane').removeClass(\"next\");\n }\n }\n);\n\n$('.next-button').hover(\n function(){\n $(this).css('cursor', 'pointer');\n }\n);\n\n$('.next-button.dataDesc').click(\n function(){\n console.log(\"Something\");\n $('.dataDesc-section').addClass(\"fold-up\");\n $('.attribute-section').removeClass(\"folded\");\n }\n);\n\n$('.attribute').on(\"change keyup paste\",\n function(){\n if($(this).val()){\n $('.icon-lock').addClass(\"next\");\n } else {\n $('.icon-lock').removeClass(\"next\");\n }\n }\n);\n\n$('.next-button').hover(\n function(){\n $(this).css('cursor', 'pointer');\n }\n);\n\n$('.next-button.attribute').click(\n function(){\n console.log(\"Something\");\n $('.attribute-section').addClass(\"fold-up\");\n $('.dependentVariable-section').removeClass(\"folded\");\n }\n);\n\n$('.dependentVariable').on(\"change keyup paste\",\n function(){\n if($(this).val()){\n $('.icon-repeat-lock').addClass(\"next\");\n } else {\n $('.icon-repeat-lock').removeClass(\"next\");\n }\n }\n);\n\n$('.next-button.dependentVariable').click(\n function(){\n console.log(\"Something\");\n $('.dependentVariable-section').addClass(\"fold-up\");\n\tvar dependentVariable=$(\"#dependentVariable\").val();\n\tvar dataDesc=$(\"#dataDesc\").val();\n\tvar attribute=$(\"#attribute\").val();\n\tvar result=\"The data is about \" + dataDesc +\" and the important attribute is \"+ attribute + \" with dependency on \"+dependentVariable;\n\talert(result);\t\n $('.success').css(\"marginTop\", 0);\n }\n);" }, { "alpha_fraction": 0.6356164216995239, "alphanum_fraction": 0.6767123341560364, "avg_line_length": 20.52941131591797, "blob_id": "54295c53f63be2dddfea11523e5edf5a9097560b", "content_id": "48ad54676233b807db8c6364568668fc3633f5d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 55, "num_lines": 17, "path": "/labelDetection/urls.py", "repo_name": "Yatheen07/django-labelDetection-GCP", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 15 19:04:39 2018\n\n@author: kmy07\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import path,include\nfrom django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$',views.homepage),\n url(r'^upload/$', views.upload, name=\"upload\"),\n url(r'^upload/detectLabels/$',views.detect_labels),\n]" }, { "alpha_fraction": 0.7864077687263489, "alphanum_fraction": 0.7864077687263489, "avg_line_length": 19.600000381469727, "blob_id": "c46cd26de65426eb5c11534a44adda3a30328db4", "content_id": "b06fe5fec4c5681a8b546e66a88e917540aee435", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 103, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/labelDetection/apps.py", "repo_name": "Yatheen07/django-labelDetection-GCP", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass LabeldetectionConfig(AppConfig):\n name = 'labelDetection'\n" }, { "alpha_fraction": 0.6899999976158142, "alphanum_fraction": 0.6936842203140259, "avg_line_length": 32.92856979370117, "blob_id": "91cb5b157f689ec6298dc271148fb096dd40ecc7", "content_id": "25d00d1d8e3e2e4c716093db0b65d61294fecdac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1900, "license_type": "no_license", "max_line_length": 119, "num_lines": 56, "path": "/labelDetection/views.py", "repo_name": "Yatheen07/django-labelDetection-GCP", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\n\n# Create your views here.\ndef homepage(request):\n return render(request,'labelDetection/index.html')\n\ndef upload(request):\n if request.method == 'POST':\n file = request.FILES['type'].file\n filename = str(request.FILES['type'])\n from PIL import Image\n image = Image.open(file)\n print(image)\n handle_uploaded_file(image,filename)\n print(\"Image Succesfully Stored\")\n return render(request,'labelDetection/index.html')\n \n return HttpResponse(\"Failed\")\n\ndef handle_uploaded_file(file,filename):\n filepath = r'E:\\\\Mini Projects\\\\insights!\\\\insights\\\\labelDetection\\\\static\\\\labelDetection\\\\img\\\\uploadedImages\\\\'\n import cv2\n import numpy as np\n fileName = filepath+filename.lower()\n #print(filename)\n image = np.array(file)\n cv2.imwrite(fileName,image)\n \n@csrf_exempt \ndef detect_labels(request):\n import os\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"E:\\Image Vision-b10cc37cecba.json\"\n from google.cloud import vision\n from google.cloud import storage\n from google.cloud.vision import types\n vision_client = vision.ImageAnnotatorClient()\n import io\n filepath = r'E:/Mini Projects/insights!/insights/labelDetection/static/labelDetection/img/uploadedImages/'\n filename = os.listdir(filepath)[0]\n filepath += filename\n print(\"Filepath is:\"+filepath)\n with io.open(filepath,'rb') as i:\n image = i.read()\n \n result = vision.types.Image(content=image)\n response = vision_client.label_detection(image=result)\n labels = response.label_annotations\n print('Labels:')\n final_result = \"The image may contain \"\n for label in labels:\n final_result+=\"<b>\"+label.description+\"</b>, \"\n \n return HttpResponse(final_result)\n" } ]
5
RajendraJadi/python-scripts
https://github.com/RajendraJadi/python-scripts
42aee204f76dde1302f4d6a1c604edc1271800f7
69a6c91bc8063817663f95b0bff1e229ffe96b74
3bc06ab5084110d9215f5bfe7fa3a5589ab46ee3
refs/heads/master
2020-03-21T21:10:50.844449
2018-06-28T17:47:27
2018-06-28T17:47:27
139,050,194
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6370106935501099, "alphanum_fraction": 0.6427046060562134, "avg_line_length": 25.509803771972656, "blob_id": "b1054a485f129c0fceed1cf1d11f9ebb48af14db", "content_id": "c859ce4e4a70b5cade2eab26dcfb878a9a501a35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1405, "license_type": "no_license", "max_line_length": 86, "num_lines": 51, "path": "/google_storage_read_write.py", "repo_name": "RajendraJadi/python-scripts", "src_encoding": "UTF-8", "text": "from google.cloud import storage\r\nimport sys, os\r\nfrom oauth2client.client import GoogleCredentials\r\ncredentials = GoogleCredentials.get_application_default()\r\n\r\n\r\n\r\n# bucket = '/gs/my-bucket'\r\n\r\ndef writeText(bucketPath, contents):\r\n \"\"\"\r\n https://cloud.google.com/storage/docs/object-basics#storage-download-object-python\r\n \"\"\"\r\n client = storage.Client()\r\n bucket = client.get_bucket(\"solr-backups-dev-1\")\r\n # Create a new blob and upload the file's content.\r\n blob = bucket.blob(bucketPath)\r\n blob.upload_from_string(contents)\r\n\r\n\r\n\r\ndef list_blobs(bucketPath):\r\n print(\"storage client\")\r\n client = storage.Client()\r\n print(\"gettig bucket\")\r\n bucket = client.get_bucket('solr-backups-dev-1')\r\n # Create a new blob and upload the file's content.\r\n blobs = bucket.list_blobs()\r\n for blob in blobs:\r\n print(blob.name)\r\n\r\n\r\ndef write(bucketPath):\r\n client = storage.Client()\r\n print(\"gettig bucket\")\r\n bucket = client.get_bucket(\"solr-backups-dev-1\")\r\n print(\"got bucket\")\r\n blob = bucket.blob(\"test.gz\")\r\n print(\"blob\")\r\n # with open('backup', 'rb') as my_file:\r\n blob.upload_from_filename(filename='backup.gz')\r\n print(\"upload\")\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n #write(\"solr-backups-dev-1\")\r\n list_blobs(\"solr-backups-dev-1\")\r\n #readText(\"solr-backups-dev-1\")\r\n # list_blobs(\"log-analysis-output-dev-1\")\r\n\r\n" } ]
1
sourabhsinha396/Automatic-Mail-sender-with-Python
https://github.com/sourabhsinha396/Automatic-Mail-sender-with-Python
d01c6abaad2fa30cae6df266b261405519ae6f3a
2f942239613586c17fed2bfdf312e216f9417220
a4d1729613c5e3cb0575330334f738836cdc0460
refs/heads/master
2020-05-26T22:13:23.509746
2019-06-18T07:00:22
2019-06-18T07:00:22
188,395,176
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6735293865203857, "alphanum_fraction": 0.6772058606147766, "avg_line_length": 18.238805770874023, "blob_id": "11db8ef5e54ab076ee6dc672f667911606efec01", "content_id": "9f06b233f186ed2d07ba6aa1a2f156a031a5d9fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1360, "license_type": "no_license", "max_line_length": 72, "num_lines": 67, "path": "/Send_from_DataFrame.py", "repo_name": "sourabhsinha396/Automatic-Mail-sender-with-Python", "src_encoding": "UTF-8", "text": "\r\nimport pandas as pd\r\ndf=pd.read_excel('company.xlsx')\r\n\r\n\r\nlist_blank=[]\r\n\r\nfor row,index in df.iterrows():\r\n print(index)\r\n list_blank.append(str(index))\r\n #list_blank.append(index)\r\nprint(\"\\n\\n\\n\\n\")\r\n#print(list_blank) \r\n\r\n\r\nimport re \r\n \r\ns=\" \"\r\nfor item in list_blank:\r\n s=s+item\r\n \r\nprint(\"Thewhole string is\")\r\nprint(s,type(s))\r\n \r\n \r\nlist_of_emails = re.findall('\\S+@\\S+', s) \r\nprint(list_of_emails) \r\ntype(list_of_emails)\r\n \r\n\r\nimport smtplib\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.base import MIMEBase\r\nfrom email import encoders\r\n\r\n\r\n\r\nemail_user='[email protected]'\r\n# email_send='[email protected]'\r\nsubject=\"Almost done\"\r\n\r\n\r\nmsg=MIMEMultipart()\r\nmsg['From']=email_user\r\n#msg['To']=email_send\r\nmsg['Subject']=subject\r\n\r\nbody=\"This is being automatically sent\"\r\nmsg.attach(MIMEText(body,'plain'))\r\n\r\nfilename='love.jpg'\r\nattachment=open(filename,'rb')\r\npart=MIMEBase('application','octet-stream')\r\npart.set_payload((attachment).read())\r\nencoders.encode_base64(part)\r\npart.add_header('Content-Disposition',\"attachment; filename= \"+filename)\r\n\r\nmsg.attach(part)\r\ntext=msg.as_string()\r\nserver=smtplib.SMTP('smtp.gmail.com',587)\r\nserver.starttls()\r\nserver.login(email_user,\"DemoPass\")\r\n\r\nfor item in list_of_emails:\r\n server.sendmail(email_user,item,text)\r\n\r\nserver.quit()\r\n\r\n" }, { "alpha_fraction": 0.6959525942802429, "alphanum_fraction": 0.7127344608306885, "avg_line_length": 19.978260040283203, "blob_id": "e39da7657a8e1a24523c6fffa38506bbfdafd2cc", "content_id": "8ba4b8b413ad2fe8aefb6c29669a84e53254c3b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1013, "license_type": "no_license", "max_line_length": 72, "num_lines": 46, "path": "/E-Mail_project.py", "repo_name": "sourabhsinha396/Automatic-Mail-sender-with-Python", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 4 10:29:35 2019\r\n\r\n@author:Sourabh Sinha\r\n\"\"\"\r\n\r\n\r\nimport smtplib\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.base import MIMEBase\r\nfrom email import encoders\r\n\r\n\r\n\r\nemail_user='[email protected]'\r\nemail_send='[email protected]'\r\nsubject=\"Python sending mail with sub+attachment\"\r\n\r\n\r\nmsg=MIMEMultipart()\r\nmsg['From']=email_user\r\nmsg['To']=email_send\r\nmsg['Subject']=subject\r\n\r\nbody=\"Hi there Abhijeet ,Its me using python to send mail\"\r\nmsg.attach(MIMEText(body,'plain'))\r\n\r\nfilename='love.jpg'\r\nattachment=open(filename,'rb')\r\npart=MIMEBase('application','octet-stream')\r\npart.set_payload((attachment).read())\r\nencoders.encode_base64(part)\r\npart.add_header('Content-Disposition',\"attachment; filename= \"+filename)\r\n\r\nmsg.attach(part)\r\ntext=msg.as_string()\r\nserver=smtplib.SMTP('smtp.gmail.com',587)\r\nserver.starttls()\r\nserver.login(email_user,\"Demo_Password\")\r\n\r\n\r\n\r\nserver.sendmail(email_user,email_send,text)\r\nserver.quit()\r\n\r\n" }, { "alpha_fraction": 0.7763158082962036, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 51.61538314819336, "blob_id": "c2db32762b239778029d90c9c8d348ba2afbf013", "content_id": "304429225983fb54ab743f73f2453b54ab360c01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 684, "license_type": "no_license", "max_line_length": 119, "num_lines": 13, "path": "/README.md", "repo_name": "sourabhsinha396/Automatic-Mail-sender-with-Python", "src_encoding": "UTF-8", "text": "# Automatic-Mail-sender-with-Python\nSend Hundreds of mail with one click\n\nNOTE: Don't send to too many email ids otherwise google or your mail provider may block you.\n\n\n* The E-Mail_project.pyfile explains how to send an email from one email id to other '1' email id.\n* Next Send_From_DataFrame.py file is able to identify email-ids from a dataframe and send to all identifies email-ids.\n* The from_text_file.py file is able to identify email-ids from a dataframe and send to all identifies email-ids.\n\nJust convert any pdf,excel,word file to doc file and send mails to hundreds of people.\n\nNOTE: Don't send to too many email ids otherwise google or your mail provider may block you.\n" } ]
3
majapklm/sorting_programs
https://github.com/majapklm/sorting_programs
2069d21b7be872a61e2d710b0df57e295eb0e174
9881b1a8d0d1bf726934b61be321ff8b77cafccf
fe04d3be4ea648541d8b40fe614cb7e8ad499d91
refs/heads/master
2021-01-16T19:14:12.593372
2015-01-07T06:34:23
2015-01-07T06:34:23
28,901,872
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4345991611480713, "alphanum_fraction": 0.5358649492263794, "avg_line_length": 20.363636016845703, "blob_id": "b668caeb16861ac552402efaa313664d1c811b8a", "content_id": "dc76ea21b96aced4a0ba10d6f9b10a498a8b9907", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 237, "license_type": "no_license", "max_line_length": 43, "num_lines": 11, "path": "/bubble_sort.py", "repo_name": "majapklm/sorting_programs", "src_encoding": "UTF-8", "text": "a = [11,33,24,78,90,10,14,55,60]\ndef bubble_sort(a):\n\tfor pass_number in range(len(a) - 1,0,-1):\n\t\tfor i in range(pass_number):\n\t\t\tif a[i] > a[i+1]:\n\t\t\t\tnumber = a[i]\n\t\t\t\ta[i] = a[i+1]\n\t\t\t\ta[i+1] = number\t\n\t\t\t\nbubble_sort(a)\nprint (a)\n\n\n" } ]
1
rohits65/interactionDistances
https://github.com/rohits65/interactionDistances
b02b186418751ec97d479918e793c312fd4b788c
2d374ba24a6e50aeb587e1bf8a49957676a63037
6629a42ada6accb83d39e4cbf6f0674e37fb0783
refs/heads/main
2023-04-10T07:18:48.274499
2021-04-19T02:28:52
2021-04-19T02:28:52
359,305,126
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5679845809936523, "alphanum_fraction": 0.5766634345054626, "avg_line_length": 19.33333396911621, "blob_id": "bae733c4630939d43beecb2cdb251aaa38f93c65", "content_id": "2f62558d09a3480073300632eb5fd35b4bdb0ca8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1037, "license_type": "no_license", "max_line_length": 64, "num_lines": 51, "path": "/distances.py", "repo_name": "rohits65/interactionDistances", "src_encoding": "UTF-8", "text": "from biopython.Bio.PDB import PDBParser\n\nimport numpy as np\nimport sys\n\nparser = PDBParser()\n\ndef getSmallestDists(model):\n \n\n smallestDists = []\n\n chitosanIds = []\n\n\n\n for chain in model:\n for residue in chain:\n if residue.resname == 'CTS':\n chitosanIds.append(chain.id)\n\n for r in model.get_residues():\n arr = []\n for id in chitosanIds:\n if r.resname == 'EMO':\n for atom in r:\n for chitosan in model[id]:\n for chitosanAtom in chitosan:\n arr.append(abs(atom - chitosanAtom))\n if len(arr) != 0:\n smallestDists.append(np.amin(arr))\n\n return smallestDists\n\n\nmodelName = sys.argv[2]\nmodelFile = sys.argv[1]\n\nprint(modelFile)\nprint(modelName)\n\nstructure = parser.get_structure(modelName, modelFile)\n\n\n\n#for model in structure:\nfor i in range(100, 151):\n arr = getSmallestDists(structure[i])\n for element in arr:\n print(element, end =' ')\n print()\n" }, { "alpha_fraction": 0.8409090638160706, "alphanum_fraction": 0.8409090638160706, "avg_line_length": 43, "blob_id": "3575746358d237996f61ad499c9e317a51a6285c", "content_id": "ad011968bdd2d213fa31619b16b142d0ec35017c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 88, "license_type": "no_license", "max_line_length": 64, "num_lines": 2, "path": "/README.md", "repo_name": "rohits65/interactionDistances", "src_encoding": "UTF-8", "text": "# interactionDistances\nFinds the smallest distances between two components in a system.\n" } ]
2
arenfuller/Learning
https://github.com/arenfuller/Learning
3edadce200e39c413ddba29244d109f43b34122e
40eac05ed7ff3ef7eb596234388607fe31b19de8
034e0e7a892fd5643ba17ffb26535cf6727efa53
refs/heads/master
2020-04-14T20:26:43.450339
2019-01-04T10:32:02
2019-01-04T10:32:02
164,093,861
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6767956018447876, "alphanum_fraction": 0.7127071619033813, "avg_line_length": 38.22222137451172, "blob_id": "efc0c0bdd7f997cab42b886e4c533bbb49ef9e91", "content_id": "caa2ed9e4eecb34e4c8f6c3b6a9d6caab2db98a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 62, "num_lines": 9, "path": "/afBonusCheck.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "#Dictionary of comapny bonus schemes\r\nGradeBonus={'1':0.01, '2':0.02, '3':0.035}\r\n\r\n#Ask the user their annual salary and their grade\r\nSalary=float(input (\"Enter your annual salary £\"))\r\nGrade= input (\"Enter your grade \")\r\nBonus=Salary*GradeBonus[Grade]\r\nprint(\"your bonus this year is £\", Bonus)\r\nprint(\"Your annual salary including bonus is £\", Bonus+Salary)\r\n" }, { "alpha_fraction": 0.6507936716079712, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 22.600000381469727, "blob_id": "224549ffd803ea9ce1618871936b49de079eabff", "content_id": "1aaf6dcc2589cd6c9a5cf178417d0f47cbd87db2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 504, "license_type": "no_license", "max_line_length": 56, "num_lines": 20, "path": "/AF_MileageCalc.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "#Engine size of 1.3L or larger are awarded 23p per mile\r\n#those unser 1.3L are awarded 15p per mile\r\n\r\nExpensiveMile=\"23p\"\r\nCheapMile=\"15p\"\r\n\r\n\r\n#UserInput for engine size\r\nEngineSize=float(input(\"Input your engine size\"))\r\n\r\n#User input for milage\r\nMilage=float(input(\"Enter the total mileage travelled\"))\r\n\r\nif EngineSize >= 1.3:\r\n AmountGranted1=Milage*23\r\n print(\"your claim is \" ,AmountGranted1)\r\n\r\nelse:\r\n AmountGranted2=Milage*15\r\n print(\"your claim is \", AmountGranted2)\r\n \r\n \r\n" }, { "alpha_fraction": 0.7103448510169983, "alphanum_fraction": 0.7213793396949768, "avg_line_length": 26.84000015258789, "blob_id": "bba2459d1db3c819140bb42e76261b04f24e997a", "content_id": "59c89f0c127ca66b88615d2f95466b2018236f6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 726, "license_type": "no_license", "max_line_length": 65, "num_lines": 25, "path": "/AfPaint.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "import math\r\n#Input dimensions of room\r\nlength=float(input(\"enter length of room\"))\r\nHeight=float(input (\"enter height of wall\"))\r\n#Input width of room\r\nWidth=float(input(\"Enter width of room\"))\r\n#Input Size of door\r\nWdoor=float(input (\"Enter width of door\"))\r\nHdoor=float(input(\"enter height of door\"))\r\n#Calculate area of door\r\nDarea=Wdoor*Hdoor\r\n#Calculate Area of Walls\r\nL1=length*Height\r\nW1=Width*Height\r\nArea=(L1*2)+(W1*2)-Darea\r\n#Calculate coverage of paint taking that one tin covers 3m square\r\nTins=Area/3\r\nFTins=round(Tins)\r\n\r\nCCan=float(input(\"enter price per can of paint\"))\r\nTCost=CCan*Tins\r\nmath.ceil(TCost)\r\n\r\nprint (f\"you will need {FTins} of tins to paint your walls\")\r\nprint (f\"It will cost £{TCost}\") \r\n\r\n" }, { "alpha_fraction": 0.7555555701255798, "alphanum_fraction": 0.7555555701255798, "avg_line_length": 31.75, "blob_id": "4e6b66a4ce48cbf42c794d972eb8b058663d33d1", "content_id": "5bcd2f9e483798a73eeac53d4e5888ac88915f19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 44, "num_lines": 4, "path": "/spacestation.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "url='http://api.open-notify.org/astros.json\"\r\nresponse=urllib.request.urlopen(url)\r\nresult=json.loads(response.read())\r\nprint(result)\r\n" }, { "alpha_fraction": 0.6495575308799744, "alphanum_fraction": 0.6566371917724609, "avg_line_length": 22.565217971801758, "blob_id": "d0a271ef55e57f60196140606e642488bc12b78b", "content_id": "503fca1454d87e17a2f01c8836df80913571b760", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 565, "license_type": "no_license", "max_line_length": 60, "num_lines": 23, "path": "/afcats.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "#working with a list of strings\r\n\r\nmy_cats=[\"cameron\", \"Milo\", \"Elyshia\"]\r\n\r\nprint(\"cats in reverse order\", my_cats[::1])\r\nprint(\"number of cats\", len(my_cats))\r\nprint(\"sorted cats\", sorted(my_cats))\r\nprint(\"reverse sorted cats\", sorted (my_cats, reverse=True))\r\nprint (\"1st cat\", my_cats[0])\r\n\r\n#Add a new cat and see it\r\nmy_cats.append(\"Lex\")\r\nprint(\"cats\", my_cats)\r\n\r\n#remove a cat by name and see the change\r\nmy_cats.remove(\"cameron\")\r\nprint(\"cats\", my_cats)\r\n\r\n#remove cat by its position\r\ndel(my_cats[0])\r\nprint(\"cats\", my_cats)\r\n\r\nprint (\"Lex\" in my_cats)\r\n" }, { "alpha_fraction": 0.5024154782295227, "alphanum_fraction": 0.5797101259231567, "avg_line_length": 23.5, "blob_id": "d85cdad41723abd2f68339a9d087d2278bcc796c", "content_id": "3d1bef95b8bf2d003d271a53efab3e865abfc65c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "no_license", "max_line_length": 51, "num_lines": 8, "path": "/AF_validation loop.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "###adding a loop for validation\r\n\r\nage=0\r\n\r\nwhile age <18 or age > 120:\r\n age=int (input(\"enter your age(18-120):\"))\r\n if age <18 or age > 120:\r\n print (\"age is invalid, please try again.\") \r\n\r\n" }, { "alpha_fraction": 0.6782178282737732, "alphanum_fraction": 0.6782178282737732, "avg_line_length": 20.44444465637207, "blob_id": "1c0b7784737c8c0175738c08e29543dc763cc1a8", "content_id": "52f0e6697a3e39e5a9afc2eee29581b233ab3d76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 202, "license_type": "no_license", "max_line_length": 33, "num_lines": 9, "path": "/AF If Statements.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "#simple if statement\r\nsecret = \"letmein\"\r\nusername=input(\"enter username:\")\r\npassword=input(\"enter password:\")\r\n\r\nif password == secret:\r\n print(\"Access Granted\")\r\nelse:\r\n print(\"Access Denied\")\r\n" }, { "alpha_fraction": 0.7085714340209961, "alphanum_fraction": 0.7314285635948181, "avg_line_length": 33, "blob_id": "c9f61070de80406b969e2f766f567759fb72e974", "content_id": "63fda98ed852ac14dc997db664d5b95d8cf6bdf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 175, "license_type": "no_license", "max_line_length": 59, "num_lines": 5, "path": "/AfTask1Day2.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "###More complex Task\r\n#to create a converter for temp from F into C\r\nTempInF=float(input(\"enter a temperature in Fahrenheight\"))\r\nTempInC=(TempInF-32)*(5/9)\r\nprint (TempInC)\r\n" }, { "alpha_fraction": 0.6595744490623474, "alphanum_fraction": 0.6638298034667969, "avg_line_length": 19.363636016845703, "blob_id": "1bef2ae25a88ecd78b526a830c10707a83e9c78d", "content_id": "74b066eb09ae09d2b4bafe07e74ea9173665f580", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "no_license", "max_line_length": 55, "num_lines": 11, "path": "/afAgeCalc.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "#Input User's name\r\nname=input(\"enter your name:\")\r\n\r\n#Input user's age\r\nage=int(input(name+\",what's your age?\"))\r\n\r\n#Calculate age next year\r\nage+=1\r\n\r\n#Print suitible message\r\nprint(name, \"In a year you will be\", age, \"years old.\")\r\n" }, { "alpha_fraction": 0.6658932566642761, "alphanum_fraction": 0.6937354803085327, "avg_line_length": 22.823530197143555, "blob_id": "d0389e8d5c6a31ebd6ced680fd9e3a9c8a28cba2", "content_id": "718b7682e3f99461346a52436b8174e8d66417bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 58, "num_lines": 17, "path": "/AFMileageSecondAttmpt.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "\r\n#working out the allowance\r\nEngineSize=float(input(\"Input your Engine Size\"))\r\n\r\n#Userinput for mileage\r\nMileage=float(input(\"Input the total distance travelled\"))\r\n\r\n#Calulation \r\n\r\nif EngineSize >=1.3:\r\n AmountGranted1=Mileage*23\r\n print (\"your claim is\") AmountGranted1 (\"Pence\")\r\n\r\nelse EngingeSize >=2.0:\r\n AmountGranted2=Mileage*35\r\n print (\"Your claim is\") AmountGranted2 (\"Pence\")\r\n\r\nelse EngingeSize \r\n \r\n" }, { "alpha_fraction": 0.6253520846366882, "alphanum_fraction": 0.6478873491287231, "avg_line_length": 27.08333396911621, "blob_id": "fc05a1eb3ca5f1ac58b13237dcdaf9471ade99a5", "content_id": "1ac05d447afbe0366cee406d5f98b2b9caf82ea2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "no_license", "max_line_length": 49, "num_lines": 12, "path": "/Af_exceptions.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "#runtime error handling\r\ntry: \r\n num1=int(input(\"enter your 1st number:\"))\r\n num2=int(input(\"enter your 2nd number:\"))\r\n result=num1/num2\r\n print(num1, \"divided by\", num2, \"is\", result)\r\n\r\nexcept ZeroDivisionError:\r\n print(\"cannot divide by zero, try again.\")\r\n\r\nexcept ValueError:\r\n print(\"Both inputs must be numbers, sorry. \")\r\n \r\n" }, { "alpha_fraction": 0.6925531625747681, "alphanum_fraction": 0.721276581287384, "avg_line_length": 40.54545593261719, "blob_id": "e23f0f1a98911c7954039a64d4a9bf8d3085a4f9", "content_id": "b4472eee89139182075a206058dbe701fd1fc679", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 940, "license_type": "no_license", "max_line_length": 123, "num_lines": 22, "path": "/Day2 warm up.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "#slicing example\r\nmystring= \"Hello World\"\r\nprint(\"1st character\", mystring[0])\r\nprint(\"5th character\", mystring[4])\r\nprint(\"last character by position\", mystring[10])\r\nprint(\"always the last character\", mystring[-1])\r\nprint(\"first 3 characters\", mystring[:3])\r\nprint (\"a slice of the string\", mystring[2:6])\r\nprint (\"a slice of the string incrementally\", mystring[2:6:2]) #starts at 2 and goes to 6 in increments of 2 \r\nprint (\"string reveresed\", mystring[::-1]) \r\n#slicing can be used on other data containers, not just strings \r\n#1st character H\r\n#5th character o\r\n#last character by position d\r\n#always the last character d\r\n#first 3 characters Hel note that the [:3] means unspecified to three and it defaults to the first [0:3] could be done too\r\n#a slice of the string llo \r\n#a slice of the string incrementally lo\r\n#string reveresed dlroW olleH\r\n\r\n#challenge 2 I want the result drWoldH\r\nprint(\"Challenge Result\", mystring[-1::-2])\r\n\r\n\r\n" }, { "alpha_fraction": 0.668067216873169, "alphanum_fraction": 0.668067216873169, "avg_line_length": 21.75, "blob_id": "946f914cd2700f295dab7b3b86fe811d5d254de1", "content_id": "aaa11d8a6265b3c315f206edb2454b245909814d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 46, "num_lines": 20, "path": "/AfPythonVennDiag.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "project_users={'Ron','Jess', 'Frank', 'Phil'}\r\n\t \r\nadmin_users={'Frank', 'Jess', 'Jack'}\r\n\t \r\n#Number of Users\r\n\t \r\nprint(len(project_users))\r\n\r\n#All Users\r\n\t \r\nprint(project_users.union(admin_users))\r\n\r\n#Users who are in both groups\r\nprint(project_users.intersection(admin_users))\r\n\r\n#Project users who are not admin\r\nprint(project_users.difference(admin_users))\r\n\r\n#Admin users not working on the project\r\nprint (admin_users.difference(project_users)) \r\n" }, { "alpha_fraction": 0.5916359424591064, "alphanum_fraction": 0.6469864845275879, "avg_line_length": 20.58333396911621, "blob_id": "52bc093e7ee5e0ef3e730ec0e66a82917ccfae9d", "content_id": "4573219301bd30664382453ccb434e7a8c359cae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 813, "license_type": "no_license", "max_line_length": 82, "num_lines": 36, "path": "/Af_Gradeing.py", "repo_name": "arenfuller/Learning", "src_encoding": "UTF-8", "text": "#Exam Marks input\r\n#for this to work they must be outside the validation rules e.g. not between 0-100\r\nRawMark1=-1\r\nRawMark2=-1\r\n\r\nwhile RawMark1 <0 or RawMark1 >100:\r\n RawMark1=int(input(\"Enter delegates raw score for paper 1:\"))\r\n \r\n if RawMark1 <0 or RawMark1 >100:\r\n print (\"Score is invalid, check and try again\")\r\n \r\nwhile RawMark2 <0 or RawMark2 >100:\r\n \r\n RawMark2=int(input(\"Enter delegates raw score for paper 2:\"))\r\n if RawMark2 <0 or RawMark2 >100:\r\n print (\"Score is invalid, check and try again\") \r\n\r\n\r\n#Average of grades\r\nAverageGrade=float(RawMark1+RawMark2)/2\r\n\r\n\r\n#AssigningGrades\r\nif AverageGrade <40:\r\n print (\"Fail\")\r\n\r\n\r\nelif AverageGrade <65:\r\n print (\"Pass\")\r\n\r\n\r\nelif AverageGrade >85:\r\n print (\"Merit\")\r\n\r\nelse:\r\n print (\"Distinction\")\r\n" } ]
14
rigas-IC/2D_LANGEVIN_RL_CONTROL
https://github.com/rigas-IC/2D_LANGEVIN_RL_CONTROL
c6956c3f01914116e141e152df212e8fa8e8b119
3c8e09dcc517138d043f9e01ce029ae2332174ef
bda38ca3b9478dcfd2d6685dd54a4b5d0e0f82fa
refs/heads/master
2022-11-05T22:21:20.631807
2020-06-19T16:51:12
2020-06-19T16:51:12
274,149,944
0
0
null
2020-06-22T13:49:38
2020-06-19T16:51:19
2020-06-19T16:51:17
null
[ { "alpha_fraction": 0.6261593699455261, "alphanum_fraction": 0.6691274046897888, "avg_line_length": 35.184932708740234, "blob_id": "796a6d474485568d573c6d23a37dc12f2add415e", "content_id": "c1af32bf9fe2a35620fb32af5946d7a92ff389ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5283, "license_type": "no_license", "max_line_length": 200, "num_lines": 146, "path": "/EVAL_ENV.py", "repo_name": "rigas-IC/2D_LANGEVIN_RL_CONTROL", "src_encoding": "UTF-8", "text": "from LANGEVIN2D_ENV import Langevin2D_Env\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\n# Environment Parameters\nenv_params = {\n \"dt\": 0.0005,\n \"T\" : 100.0,\n \"a\" : 1.0 +1.0j,\n \"b\" : -5.0e2,\n \"D\" : 0.0e-4,\n \"x0\": 0.04472135955 + 0.0j\n }\n\n# Path to save the figure\n#fig_path = 'figures/FeedbackControl_Kp5_mag_1_D_0_gr_10_wn_10.png'\n#fig_path = 'figures/NoControl_D_1em4_gr_10_wn_10.png'\nfig_path = None\n\n\n# Create instance of complex Stuart-Landau equation environment\nenvironment = Langevin2D_Env()\nenvironment.env_params = env_params\n\n# Initiate environment to initial state\ntime = np.zeros((environment.max_episode_timesteps()))\nstates = np.zeros((environment.max_episode_timesteps(),2))\nactions = np.zeros((environment.max_episode_timesteps(),2))\nstate = environment.reset()\nstates[0,:] = state[\"observation\"]\n\n# Episode reward - defined as magnitude of the complex state\nsum_rewards = 0.0\n\n# Set up control time with reference to simulation time\ndt = environment.env_params[\"dt\"]\ndt_action = 0.05\nT = environment.env_params[\"T\"]\nn_env_steps = int(dt_action / dt)\nn_actions = int(T/dt/n_env_steps)\n\n# Proportional gain - If using feedback control\nKp_r = 0.0\nKp_i = 0.0\nmax_forcing_mag = 1.0\n\nobservation = states[0,:]\n\n# March system for specified number of timesteps\n\nfor ii in range(0,n_actions):\n\n p_control = np.array([-Kp_r*observation[0] , -Kp_i*observation[1]])\n \n for jj in range(0,n_env_steps):\n actions[jj + ii*n_env_steps,:] = np.clip(p_control, -max_forcing_mag, max_forcing_mag)\n state, terminal, reward = environment.execute(actions= p_control)\n observation = state[\"observation\"]\n states[jj + ii*n_env_steps,:] = observation\n time[jj + ii*n_env_steps] = environment.time\n sum_rewards += reward\n\n# Compute and output episode metrics\nprint('Episode cumulative reward: {} - Average reward: {}'.format(sum_rewards, sum_rewards/environment.max_episode_timesteps()))\n\nfig = plt.figure(figsize=(16,9))\nfig.tight_layout()\n\nif (Kp_i == 0 and Kp_r == 0):\n fig.suptitle('No Control - Episode cumulative reward: {} - Average reward: {}'.format(sum_rewards, sum_rewards/environment.max_episode_timesteps()))\nelse:\n fig.suptitle('Proportional Feedback Control Kp_r={}, Kp_i={} - Episode cumulative reward: {} - Average reward: {}'.format(Kp_r, Kp_i, sum_rewards, sum_rewards/environment.max_episode_timesteps()))\n\nplt.subplots_adjust(top=0.925, bottom=0.05, right=0.95, left=0.05, hspace=0.5)\n\n# 2D Histogram (PDF) of the state\nnbins = 200\nN_2D, x_edges, y_edges = np.histogram2d(states[:,0],states[:,1], np.array([nbins,2*nbins]))\nPDF_2D = N_2D / environment.max_episode_timesteps() / (x_edges[1]-x_edges[0]) / (y_edges[1]-y_edges[0])\n\n# Plot 2D PDF as pcolormesh\nX,Y = np.meshgrid(x_edges, y_edges)\nax0 = plt.subplot2grid(shape=(4,2), loc=(0,0), rowspan=2, colspan= 1)\nim = ax0.pcolormesh(X, Y, PDF_2D.T, cmap= plt.get_cmap('hot_r'))\nfig.colorbar(im, ax = ax0)\nax0.set_title('2D PDF of the system states')\nax0.set_xlabel('Re(x)')\nax0.set_ylabel('Im(x)')\n\n# 1D PDF\nN_1D_re , x_edges_1D_re = np.histogram(states[:,0],bins = nbins)\nPDF_1D_re = N_1D_re / environment.max_episode_timesteps() / (x_edges_1D_re[1] - x_edges_1D_re[0])\n\nN_1D_im , x_edges_1D_im = np.histogram(states[:,1],bins = nbins)\nPDF_1D_im = N_1D_im / environment.max_episode_timesteps() / (x_edges_1D_im[1] - x_edges_1D_im[0])\n# Plot 1D PDF\nax1 = plt.subplot2grid(shape=(4,2), loc=(2,0), rowspan=1, colspan= 1)\nax1.plot(x_edges_1D_re[:-1], PDF_1D_re)\nax1.set_title('1D PDF of the real component of the state')\nax1.set_xlabel('Re(x)')\nax1.set_ylabel('P(Re(x))')\n\nax2 = plt.subplot2grid(shape=(4,2), loc=(3,0), rowspan=1, colspan= 1)\nax2.plot(x_edges_1D_im[:-1], PDF_1D_im)\nax2.set_title('1D PDF of the imaginary component of the state')\nax2.set_xlabel('Im(x)')\nax2.set_ylabel('P(Im(x))')\n\n# Estimate power spectral density using Welch method\nn_window = int(environment.max_episode_timesteps()/10)\nFs = 1/environment.env_params[\"dt\"]\nwindow = signal.get_window('hann', n_window)\nf_re , PSD_re = signal.welch(states[:,0], fs= Fs, window= window, noverlap= 0.5*n_window, nfft= n_window)\nf_im , PSD_im = signal.welch(states[:,1], fs= Fs, window= window, noverlap= 0.5*n_window, nfft= n_window)\n# Plot PSD\nax3 = plt.subplot2grid(shape=(4,2), loc=(0,1), rowspan=2, colspan= 1)\nax3.loglog(f_re, PSD_re)\nax3.loglog(f_im, PSD_im)\nax3.set_title('Power Spectral Density of the state')\nax3.set_xlabel('f [Hz]')\nax3.set_ylabel('S(x)')\nax3.legend(('Real', 'Imaginary'))\n\n# Plot trajectory of system\nax4 = plt.subplot2grid(shape=(4,2), loc=(2,1), rowspan=1, colspan= 1)\nax4.plot(time, states[:,0])\nax4.plot(time, states[:,1])\nax4.set_title('Trajectory of the state')\nax4.set_xlabel('t')\nax4.set_ylabel('x')\nax4.legend(('Real', 'Imaginary'))\n\n# Plot control input to the system\nax5 = plt.subplot2grid(shape=(4,2), loc=(3,1), rowspan=1, colspan= 1)\nax5.plot(time, actions[:,0])\nax5.plot(time, actions[:,1])\nax5.set_title('Control input to the system - Controller time rate: {}'.format(dt_action))\nax5.set_xlabel('t')\nax5.set_ylabel('u')\nax5.legend(('Real', 'Imaginary'))\n\nif fig_path is not None:\n fig.savefig(fig_path)\nplt.show()\n" }, { "alpha_fraction": 0.6388521194458008, "alphanum_fraction": 0.6618101596832275, "avg_line_length": 26.621952056884766, "blob_id": "14caab06669b1de11bf0ea2606bf59a1ab181f94", "content_id": "b6aaeffa28b66ddc4c81579c4172d4bdb35edcb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2265, "license_type": "no_license", "max_line_length": 221, "num_lines": 82, "path": "/README.md", "repo_name": "rigas-IC/2D_LANGEVIN_RL_CONTROL", "src_encoding": "UTF-8", "text": "# 2D_LANGEVIN_RL_CONTROL\nReinforcement Learning based control of the stochastic Stuart-Landau equation\n\n# Installation\n[TensorForce](https://github.com/tensorforce) version 0.5.4 is required to run the environment and train the RL agent.\n\n# Training an Agent\nIn TRAIN_AGENT.py, define:\n\n- the directory for the TensorForce model checkpointing:\n\n```python\n# Saver directory\ndirectory = os.path.join(os.getcwd(), 'agents' ,'saver_data_model_name')\n```\n\n- the environment parameters for the Stuart-Landau system:\n``` python\n# Environment Parameters\nenv_params = {\n \"dt\": 0.0005,\n \"T\" : 100.0,\n \"a\" : 1.0 + 1.0j,\n \"b\" : -5.0e2,\n \"D\" : 0.0e-4,\n \"x0\": 0.03 + 0.0j\n }\n\n# Controller Parameters\noptimization_params = {\n \"min_value_forcing\": -1.0,\n \"max_value_forcing\": 1.0\n }\n \n# Training Parameters\ntraining_params = {\n \"num_episodes\" : 300,\n \"dt_action\" : 0.05\n}\n```\n \n- the actor and critic Neural Networks as a list of layer lists (refer to the [TensorForce](https://tensorforce.readthedocs.io/en/latest/index.html) documentation). The following example integrates dense and LSTM layers: \n```python\nnetwork = [\n [ \n dict(type='retrieve', tensors='observation'),\n dict(type='dense', size=16),\n dict(type='register' , tensor ='intermed-1')\n ],\n [ \n dict(type='retrieve', tensors='prev_action'),\n dict(type='dense', size=16),\n dict(type='register' , tensor ='intermed-2')\n ],\n [\n dict(type='retrieve', tensors=['intermed-1','intermed-2'], aggregation='concat'),\n dict(type='internal_lstm', size=32, length=1, bias=True),\n dict(type='dense', size=16),\n ]\n]\n```\n\n- if required, the additional Agent and runner parameters\n\n- execute the script and training will execute.\n\n# Evaluating an agent\nIn EVAL_ENV_RL, define:\n- the environment parameters, in a similar way as shown above\n- the path and filename of the figure to be saved. Define 'figpath=None' if figure is not to be saved:\n```python\n# Path to save the figure\nfig_path = 'figures/RLControl_Run_Description.png'\n```\n\n- the path to the TensorForce model saver:\n\n```python\n# Saver directory\ndirectory = os.path.join(os.getcwd(), 'agents' ,'saver_data_model_name')\n```\n- execute the script.\n" }, { "alpha_fraction": 0.5649644732475281, "alphanum_fraction": 0.5832794904708862, "avg_line_length": 30.154361724853516, "blob_id": "8a2cc5aea563c5be14ee68e316eb287e1d30386d", "content_id": "4969ae1fe37820554efa4ef7444cc6c53e5fdf61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4641, "license_type": "no_license", "max_line_length": 124, "num_lines": 149, "path": "/TRAIN_AGENT.py", "repo_name": "rigas-IC/2D_LANGEVIN_RL_CONTROL", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport numpy as np\nimport json\n\nfrom tensorforce.agents import Agent\nfrom tensorforce.environments import Environment\nfrom tensorforce.execution import Runner\n\nfrom LANGEVIN2D_ENV import Langevin2D_Env\n\n###############################################################################\n# PARAMETERS\n###############################################################################\n# Parallel\nnum_env = 8\n\n# Saver directory\ndirectory = os.path.join(os.getcwd(), 'agents' ,'saver_data_D_0_dta_0p05_maxa_1_ep100_lstm2_32_dense_64_gr_1_wn_1_r_ma1em2')\n\n# Environment Parameters\nenv_params = {\n \"dt\": 0.0005,\n \"T\" : 100.0,\n \"a\" : 1.0 + 1.0j,\n \"b\" : -5.0e2,\n \"D\" : 0.0e-4,\n \"x0\": None\n }\n\n# Controller Parameters\noptimization_params = {\n \"min_value_forcing\": -1.0,\n \"max_value_forcing\": 1.0\n }\n\n# Training Parameters\ntraining_params = {\n \"num_episodes\" : 400,\n \"dt_action\" : 0.05\n}\n\n# Compute environment and action input timesteps\nn_env_steps = int(training_params[\"dt_action\"] / env_params[\"dt\"])\nmax_episode_timesteps = int(env_params[\"T\"]/env_params[\"dt\"]/n_env_steps)\n\n# Create and instance of the complex Stuart-Landau environment\nenvironment = Langevin2D_Env(n_env_steps = n_env_steps)\nenvironment.env_params = env_params\nenvironment.optimization_params = optimization_params\n\nenvironments = []\n\nfor env in range(num_env):\n environments.append(Langevin2D_Env(n_env_steps = n_env_steps))\n environments[env].env_params = env_params\n environments[env].optimization_params = optimization_params\n\n###############################################################################\n# ACTOR/CRITIC NETWORK DEFINITIONS\n###############################################################################\n\n# Specify network architecture\n# DENSE LAYERS\n# actor_network = [ \n# dict(type='retrieve', tensors='observation'),\n# dict(type='dense', size=2),\n# ]\n\n# LSTM\nactor_network = [\n [ \n dict(type='retrieve', tensors='observation'),\n dict(type='internal_lstm', size=32, length=2, bias=False),\n dict(type='register' , tensor ='intermed-1')\n ],\n [ \n dict(type='retrieve', tensors='prev_action'),\n dict(type='internal_lstm', size=32, length=2, bias=False),\n dict(type='register' , tensor ='intermed-2')\n ],\n [\n dict(type='retrieve', tensors=['intermed-1','intermed-2'], aggregation='concat'),\n dict(type='dense', size=64),\n ]\n]\n\ncritic_network = actor_network\n\n###############################################################################\n# AGENT DEFINITION\n###############################################################################\n\n# Specify the agent parameters - PPO algorithm\nagent = Agent.create(\n # Agent + Environment\n agent='ppo', # Agent specification\n environment=environment, # Environment object\n exploration=0.0,\n # Network\n network=actor_network, # Policy NN specification\n # Optimization\n batch_size=num_env, # Number of episodes per update batch\n learning_rate=1e-4, # Optimizer learning rate\n subsampling_fraction=0.33, # Fraction of batch timesteps to subsample\n optimization_steps=25,\n # Reward estimation\n likelihood_ratio_clipping=0.2, # The epsilon of the ppo CLI objective\n estimate_terminal=False, # Whether to estimate the value of terminal states\n # TODO: gae_lambda=0.97 doesn't currently exist - ???\n # Critic\n critic_network=critic_network, # Critic NN specification\n critic_optimizer=dict(\n type='multi_step', num_steps=5,\n optimizer=dict(type='adam', learning_rate=1e-4)\n ),\n # Regularization\n entropy_regularization=0.01, # To discourage policy from being too 'certain'\n # Parallel\n parallel_interactions=num_env,\n # TensorFlow\n saver=dict(directory=directory, filename=\"agent\"), # TensorFlow saver configuration for periodic implicit saving\n # TensorBoard Summarizer\n #summarizer=dict(directory=os.path.join(directory, 'summarizer') , labels=\"all\")\n)\n\n###############################################################################\n# TRAINING\n###############################################################################\n\n# Runner definition - Serial runner\nrunner = Runner(\n environments=environments,\n agent=agent,\n remote=\"multiprocessing\",\n max_episode_timesteps=max_episode_timesteps,\n #evaluation=True\n)\n\n# Proceed to training\nrunner.run(\n num_episodes=training_params[\"num_episodes\"],\n sync_episodes=True,\n #save_best_agent=os.path.join(os.getcwd(), 'best_agent')\n)\n\nagent.save()\n\nrunner.close()" }, { "alpha_fraction": 0.5448687076568604, "alphanum_fraction": 0.561785101890564, "avg_line_length": 36.85365676879883, "blob_id": "f7fe7a2388411d98c9ccaa51c512d2790a8804bf", "content_id": "b4278d4acf7b3098577393939b54d08d65a382ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6217, "license_type": "no_license", "max_line_length": 143, "num_lines": 164, "path": "/LANGEVIN2D_ENV.py", "repo_name": "rigas-IC/2D_LANGEVIN_RL_CONTROL", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom tensorforce.environments import Environment\n\nclass Langevin2D_Env(Environment):\n\n '''\n Defines the parameters for the 2D Langevin Stuart Landau equation\n dt (float) - time step for dynamics evolution - default: 0.0005\n T (float) - total simulation time - default: 100.0\n a (complex float) - Re(a): growth rate of the dynamics - Im(a): angular frequency at equilibrium - default: 10.0 +0.0j\n b (complex float) - Re(b): saturation term - Im(b): strength of the non-linear coupling between amplitude and frequency - default: -5.0e2\n D (float) - Diffusion coefficient associated with Gaussian White Noise forcing - default: 1.0e-2\n x0 (Complex float) - Initial position of the system - default: 0.03 + 0.0j\n '''\n env_params = {\n \"dt\": 0.0005,\n \"T\" : 100.0,\n \"a\" : 1.0 +1.0j,\n \"b\" : -5.0e2,\n \"D\" : 1.0e-1,\n \"x0\": 0.03 + 0.0j\n }\n \n optimization_params = {\n \"min_value_forcing\": -1.0,\n \"max_value_forcing\": 1.0\n }\n\n\n def __init__(self, n_env_steps = 1):\n super().__init__()\n self.state = 0.0 + 0.0j # Internal state of the syste,\n self.time = 0.0 # Internal time of the system\n self.n = 0 # Step number\n self.N = int(self.env_params[\"T\"] / self.env_params[\"dt\"]) # Maximum number of steps to take\n self.n_env_steps = n_env_steps # Number of environment steps to march the system between actions\n print(self.N)\n\n def states(self):\n '''\n Returns the state space specification.\n :return: dictionary of state descriptions with the following attributes:\n type (\"bool\" / \"int\" / \"float\") – state data type (required)\n shape (int > 0 / iter[int > 0]) – state shape (default: scalar)\n '''\n return dict(observation = dict(type='float', shape=(2,)), prev_action = dict(type='float', shape=(2,)))\n \n def actions(self):\n '''\n Returns the action space specification.\n :return: dictionary of action descriptions with the following attributes:\n type (\"bool\" / \"int\" / \"float\") – action data type (required)\n shape (int > 0 / iter[int > 0]) – action shape\n min_value/max_value (float) – minimum/maximum action value\n '''\n return dict(type='float', shape=(2,),\n min_value=self.optimization_params[\"min_value_forcing\"],\n max_value=self.optimization_params[\"max_value_forcing\"])\n\n def reset(self):\n \"\"\"\n Reset environment and setup for new episode.\n Returns:\n initial state of reset environment.\n \"\"\"\n # Reset simulation time\n self.time = 0.0\n self.n = 0\n\n # Reset environment to initial position\n if self.env_params[\"x0\"] is not None:\n self.state = self.env_params[\"x0\"]\n else:\n # Initial position on limit-cycle\n eq = np.sqrt(-np.real(self.env_params[\"a\"])/np.real(self.env_params[\"b\"]))\n self.state = eq*np.exp(np.random.normal(scale= 0.5*np.pi)*1j)\n \n print(self.state)\n \n self.N = int(self.env_params[\"T\"] / self.env_params[\"dt\"]) # Maximum number of steps to take\n \n next_state = dict(\n observation = np.array([np.real(self.state),np.imag(self.state)]).flatten(),\n prev_action = np.zeros((2,))\n )\n\n return(next_state)\n \n def execute(self, actions = np.array([0.0,0.0])):\n '''\n Run solver for one action step, until next RL env state (this means to run for number_steps_execution)\n :param: actions\n :return: next state (state value at end of action step)\n terminal\n reward (magnitude of the state)\n '''\n\n action = actions[0] + actions[1]*1j\n \n # Parameters of the system\n a = self.env_params[\"a\"]\n b = self.env_params[\"b\"]\n D = self.env_params[\"D\"]\n\n # Solver parameters\n dt = self.env_params[\"dt\"]\n\n # Gaussian White Noise forcing\n sigma = np.sqrt(2*D) # STD of stochastic forcing\n \n # March the system using Euler-Maruyama method for discretization\n # The system will evolve by n_env_steps steps between control input\n cum_reward = 0.0\n\n for _ in range(self.n_env_steps):\n An = np.random.normal(0.0,sigma) + np.random.normal(0.0,sigma)*1j\n\n # Deterministic component of system: complex Stuart-Landau equation\n SL_deterministic = a * self.state + b * self.state * np.square(np.abs(self.state))\n \n self.time = self.time + dt\n self.state = self.state + SL_deterministic * dt + action * dt + An * np.sqrt(dt)\n\n self.n += 1\n cum_reward -= np.abs(self.state)\n\n # Extract Real and Imaginary part of state as two separate states\n # Ensure reshape to size (2,)\n next_state = dict(\n observation = np.array([np.real(self.state),np.imag(self.state)]).reshape(2,),\n prev_action = actions\n )\n\n terminal = False\n\n # Reward based on magnitude of the state\n #reward = -np.abs(self.state)\n \n # Reward based on average magnitude of the state\n #reward =cum_reward / self.n_env_steps\n\n # Reward based on average magnitude of the state and action input penalization\n reward = cum_reward / self.n_env_steps - 1e-2*(np.abs(action) / self.optimization_params[\"max_value_forcing\"])\n\n # Print completion status to console\n if (self.n % (self.N/20) == 0):\n print(self.n)\n\n return (next_state, terminal, reward)\n\n def max_episode_timesteps(self):\n \n N = int(self.env_params[\"T\"] / self.env_params[\"dt\"])\n return N\n\n\nif __name__ == \"__main__\": \n env = Langevin2D_Env()\n\n next_state = env.reset()\n print(next_state)\n\n next_state, terminal, reward = env.execute()\n print(next_state, terminal, reward)" } ]
4
aidotse/Sentiment-web-app
https://github.com/aidotse/Sentiment-web-app
3f75d46f66a618823000d4722b16c5065205a770
fc49aecbc9b62afba6e0b38fd386802d94ab6115
7ad882e3b4a9ebd6d34d8b475c4d74d68c8ebb4b
refs/heads/master
2023-05-11T15:41:39.536444
2021-05-19T09:04:24
2021-05-19T09:04:24
351,106,038
1
0
null
2021-03-24T14:16:48
2021-05-28T09:57:09
2021-05-31T07:02:22
Python
[ { "alpha_fraction": 0.6541353464126587, "alphanum_fraction": 0.66595059633255, "avg_line_length": 33.407405853271484, "blob_id": "be8ea53d9df440d319e68e4276e4d0ce6c5bf565", "content_id": "6fd2b1249b833b8db232e9fee45f72b0a43889f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 931, "license_type": "no_license", "max_line_length": 114, "num_lines": 27, "path": "/example_request.py", "repo_name": "aidotse/Sentiment-web-app", "src_encoding": "UTF-8", "text": "import requests\n\nurlapi='http://localhost:3130/api'\nurlping='http://localhost:3130/ping'\n\n# file_path = 'path/to/your/data.csv'\nfile_path = \"upload/app-test-data.csv\"\nsetup_path = \"setup.json\"\n\ntry:\n ping = requests.get(urlping).json()['Status']\n print(ping)\nexcept:\n print(f\"Inference servers is down, please try again in a bit\")\n\ntry:\n if setup_path and file_path:\n files = {'eval_file': open(file_path,'r',encoding=\"utf-8\"),'setup': open(setup_path,'r',encoding=\"utf-8\")}\n print(f\"Starting inference request\")\n r = requests.post(urlapi, files=files)\n elif setup_path and not file_path:\n setup = {'setup': open(setup_path,'r',encoding=\"utf-8\")}\n print(f\"Starting inference request\")\n r = requests.post(urlapi, files = setup)\n print(r.json())\nexcept FileNotFoundError:\n print(f\"File could not be found, please check the specified file and setup path and filename\")\n\n\n" }, { "alpha_fraction": 0.40458014607429504, "alphanum_fraction": 0.5954198241233826, "avg_line_length": 15.5, "blob_id": "d0deaef9f8b66cade51a426275fcdb7cffa4b720", "content_id": "88d55dd41a79c4a5bfdd629b00cf1b3d947343e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 131, "license_type": "no_license", "max_line_length": 21, "num_lines": 8, "path": "/requirements.txt", "repo_name": "aidotse/Sentiment-web-app", "src_encoding": "UTF-8", "text": "flask >= 1.1.2\npandas >= 1.2.3\ntransformers >= 4.5.1\ntorch >= 1.8.0\ntqdm >= 4.49.0\nnumpy >=1.18\nwerkzeug >= 1.0.1\nwatchdog >= 2.0.2" }, { "alpha_fraction": 0.6262909173965454, "alphanum_fraction": 0.6329300403594971, "avg_line_length": 39.22255325317383, "blob_id": "391d43d3d42b4432634225ea3a90e3a792a622b0", "content_id": "51d3d689784a6fefe8bfbd9937ab3cc8eb44164b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13556, "license_type": "no_license", "max_line_length": 240, "num_lines": 337, "path": "/app.py", "repo_name": "aidotse/Sentiment-web-app", "src_encoding": "UTF-8", "text": "import os\nimport socket\nimport json\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom flask import Flask, render_template, request, jsonify\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom transformers import BertForSequenceClassification, BertTokenizerFast, BertForTokenClassification\nfrom werkzeug.utils import secure_filename\n\n\ndef next_free_port(port=3130, max_port=65535):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n while port <= max_port:\n try:\n sock.bind(('', port))\n sock.close()\n return port\n except OSError:\n port += 1\n raise IOError('no free ports')\n\n\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, encodings, labels):\n self.encodings = encodings\n self.labels = labels\n\n def __getitem__(self, idx):\n item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n item['labels'] = torch.tensor(self.labels[idx]) #\n return item\n\n def __len__(self):\n return len(self.labels)\n\n\ndef load_sentiment_classifier(model):\n classifier = BertForSequenceClassification.from_pretrained(\n # \"KB/bert-base-swedish-cased\", # Use the 12-layer BERT model, with a cased vocab.\n model,\n # You can increase this for multi-class tasks.\n output_attentions=False, # Whether the model returns attentions weights.\n output_hidden_states=False, # Whether the model returns all hidden-states.\n )\n return classifier\ndef load_token_classifier(model):\n classifier = BertForTokenClassification.from_pretrained(\n # \"KB/bert-base-swedish-cased\", # Use the 12-layer BERT model, with a cased vocab.\n model,\n # You can increase this for multi-class tasks.\n output_attentions=False, # Whether the model returns attentions weights.\n output_hidden_states=False, # Whether the model returns all hidden-states.\n )\n return classifier\n\n\ndef tokenize_data(data, tokenizer):\n label = np.zeros(len(data)) # dummy labels are used so that the entire Data loader class needs to be rewritten, does not affect classification\n label = torch.tensor(label, dtype=int)\n encodings = tokenizer(list(data), truncation=True, padding=True, max_length= 512)\n transformed_data = Dataset(encodings, label)\n return transformed_data\n\n\ndef pred_frag(tokenized_data, classifier):\n pred = np.array([])\n ver_data_loader = DataLoader(tokenized_data, batch_size=2, shuffle=False)\n for batch in tqdm(ver_data_loader):\n batch = {k: v.to(device) for k, v in batch.items()}\n # _, logits = classifier(**batch) # run local\n output = classifier(**batch) # run MLab\n logits = output.logits # run MLab\n p_soft_max = torch.softmax(logits, dim=1)[:, 1:].tolist()\n # tmp_pred = p_soft_max[0] # non scaled version\n tmp_pred = [p_soft_max[0][0] * 0.75, p_soft_max[0][1]] # scaled weak sentiment from [0 1] to [0 0.75]\n pred = np.append(pred, tmp_pred)\n return pred\n\n\ndef save_files(file):\n errors = {}\n success = False\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n success = True\n else:\n errors['message'] = 'File extension is not allowed'\n\n if success and errors:\n errors['message'] = 'File successfully uploaded'\n resp = jsonify(errors)\n resp.status_code = 206\n return resp\n if success:\n resp = jsonify({'message': 'File successfully uploaded', 'filename': filename})\n resp.status_code = 201\n return resp\n else:\n resp = jsonify(errors)\n resp.status_code = 400\n return resp\n\nUPLOAD_FOLDER = 'upload'\nALLOWED_EXTENSIONS = {'csv'}\n\nif not os.path.exists(\"upload\"):\n os.makedirs(\"upload\")\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['MAX_CONTENT_LENGTH'] = 10 * 1024 * 1024 # max input file size is roughly 10mb\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Loading tokenizer\")\ntokenizer = BertTokenizerFast.from_pretrained(\"RecordedFuture/Swedish-Sentiment-Fear\")\nprint(f\"Loading Violence Sentiment model\")\nclassifier_violence = load_sentiment_classifier(\"RecordedFuture/Swedish-Sentiment-Violence\").to(device)\nprint(f\"Loading Fear Sentiment model\")\nclassifier_fear = load_sentiment_classifier(\"RecordedFuture/Swedish-Sentiment-Fear\").to(device)\nprint(f\"Loading Fear sentiment target model\")\nclassifier_fear_targets = load_token_classifier(\"RecordedFuture/Swedish-Sentiment-Fear-Targets\").to(device)\nprint(f\"Loading Violence sentiment target model\")\nclassifier_violence_targets = load_token_classifier(\"RecordedFuture/Swedish-Sentiment-Violence-Targets\").to(device)\n\ndef model_selector(setup):\n if setup[\"model\"] == \"fear\": # make the sentiment classifier selection\n classifier = classifier_fear\n elif setup[\"model\"] == \"violence\":\n classifier = classifier_violence\n elif setup[\"model\"] == \"fear_target\":\n classifier = classifier_fear_targets\n elif setup[\"model\"] == \"violence_target\":\n classifier = classifier_violence_targets\n return classifier\n\ndef input_source(setup):\n if setup['message']: # the text box is used\n message = setup[\"message\"]\n elif setup['filename'] and not setup['message']:\n message = pd.read_csv(f\"../Bert-app/upload/{setup['filename']}\", header=None, usecols=[0])\n # read the file name from the upload folder, use the first col as the data column\n else:\n return {\"message\": \"No data received in payload\", \"pred\": \"\"}\n return message\n\ndef prepare_data(setup, message):\n\n index_all = [] # var for storing the indexing\n index = 0 # start indexing at zero\n\n if setup['message']: # if the text message box is used it should take priority\n data_pred = []\n s_frag = message.split(\".\") # split all fragments on \".\", if \".\" not in frag nothing happens\n for s in s_frag: # loop through the list of split strings\n if s != \"\" and s != \" \": # if a string is not whitespace save it to data for eval\n data_pred.append(s)\n index_all.append(index) # append the indexing for max sorting\n message = pd.Series(message)\n elif not setup['message'] and setup[\"filename\"]: # double check to make sure that the tex box is not in use and a file is uploaded\n data_pred = []\n # message = pd.read_csv(f\"../Bert-app/upload/{setup['file']}\", header=None, usecols=[\n # 0]) # read the file name from the upload folder, use the first col as the data column\n if len(message) > 1: # different methods for handling if all the data is present in one csv cell or not, due to the list() method transorming each char to a seperate string in that case\n tmp_data = list(message.squeeze()) # transform DF to Series and format it as a list\n else:\n tmp_data = [message.squeeze()]\n for frag in tmp_data: # roll through all fragments from the file\n s_frag = frag.split(\".\") # split all fragments on \".\", if \".\" not in frag nothing happens\n for s in s_frag: # loop through the list of split strings\n if s != \"\" and s != \" \": # if a string is not whitespace save it to data for eval\n data_pred.append(s)\n index_all.append(index) # indexing for max sorting\n index += 1 # inc index after one cell is processed.\n else:\n print(\"No data in text window and no file uploaded\")\n return {\"message\": \"no_data_uploaded_or_in_text_area_\", \"pred\": 0}\n\n return data_pred, index_all\n\ndef predict(setup):\n\n message = input_source(setup)\n\n classifier = model_selector(setup)\n\n data_pred , index_all = prepare_data(setup, message)\n\n pred = [] # var for storing the predctions\n label = [] # var for storing the labels of the\n batches = chunks(data_pred, 1) # can probably batch it in larger than 1\n for batch in batches:\n tokenized_data = tokenize_data(batch, tokenizer)\n tmp_pred = pred_frag(tokenized_data, classifier=classifier)\n pred.append(round(np.max(tmp_pred), 2))\n label.append(np.argmax(tmp_pred))\n\n ### all post processesing of the results should be done in the \"front end\".\n ### the back end should only do the prediction and always return the results in the same format\n data_disp = []\n pred_disp = []\n if setup['group_result'] == 'unsorted': # if data aggregation button selection is seperate just continue, all if formatted correctly already\n data_disp = data_pred\n pred_disp = pred\n\n elif setup['group_result'] == 'sorted': # if the button is set to max\n if len(pred) < 2: # if len of pred is 1 then just continue, you cant sort a single float\n data_disp = data_pred\n pred_disp = pred\n else: # if the number of predictions made is higher than 5, get the indexes of the top 5 predictions and get the pred values and fragments\n sorted_based_max_pred = np.array(pred).argsort()[:][::-1] # sorted\n pred_disp = [pred[i] for i in sorted_based_max_pred] # format the predictions to make the output consistent\n data_disp = [data_pred[i] for i in sorted_based_max_pred] # format the data to make the output consistent\n elif setup['group_result'] == 'max': # if the button is set to max\n if len(pred) < 2: # if len of pred = 1 then do nothing since the max of a float is itself\n data_disp = data_pred\n pred_disp = pred\n else:\n pred_max = []\n for uniq in np.unique(index_all): # check all unique indexes\n tmp_pred = []\n for i, val in enumerate(index_all): # check all available indexes from splitting\n if val == uniq: # get attribute prediction with the same index\n tmp_pred.append(pred[i])\n pred_max.append(np.max(tmp_pred))\n pred_disp = pred_max\n\n if len(message) > 1 and type(message).__name__ == 'DataFrame': # different methods for handling if all the data is present in one csv cell or not, due to the list() method transorming each char to a seperate string in that case\n data_disp = list(message.squeeze()) # transform DF to Series and format it as a list\n else:\n data_disp = [message.squeeze()]\n\n\n ret = {\"message\": data_disp,\n \"pred\": pred_disp,\n \"message_raw\": data_pred,\n \"pred_raw\": pred,\n \"index\": index_all}\n\n return ret\n\[email protected]('/ping', methods = [\"GET\"])\ndef ping():\n return jsonify({\"Status\":\"Server is live\"})\n\[email protected]('/')\ndef home():\n return render_template('home.html')\n\[email protected]('/guide')\ndef guide():\n return render_template('guide.html')\n\[email protected]('/echo/<message>',methods = [\"GET\"])\ndef echo(message):\n print(f\"{message}\")\n return {\"message\":message}\n\[email protected]('/api', methods=[\"POST\"])\ndef api():\n\n setup = request.files['setup'].read().decode(\"utf-8\")\n setup = json.loads(setup)\n\n if 'eval_file' in request.files: # check if \"eval_file\" is in request.files, only occures when the \"eval_file\" input is used in a curl request\n file = request.files['eval_file'] # take the uploaded file\n resp = save_files(file) # run it through the save_file function to save it in the upload folder\n resp = resp.json # read the response as JSON\n setup['filename'] = resp['filename']\n else:\n setup['filename']= \"\"\n\n resp = predict(setup)\n return resp\n\[email protected]('/api/input',methods = [\"GET\"])\ndef api_input():\n return jsonify({\"group_result\":[\"unsorted\",\"sorted\",\"max\"],\n \"model\":[\"fear\",\"violence\"],\n \"message\": \"any string\",\n \"eval_file\": \"@path/to/file.csv\"\n })\n\[email protected]('/pred_endpoint', methods=[\"POST\"])\ndef pred_endpoint():\n setup = {\n \"group_result\": request.form['group_result'],\n \"model\": request.form['model'],\n \"filename\": request.form['filename'],\n \"message\": request.form['message']\n }\n\n response = predict(setup)\n\n return response\n\[email protected]('/python-flask-files-upload', methods=['POST'])\ndef upload_file():\n # check if the post request has the file part\n if 'files[]' not in request.files:\n resp = jsonify({'message': 'No file part in the request'})\n resp.status_code = 400\n return resp\n\n files = request.files.getlist('files[]')\n for file in files:\n resp = save_files(file)\n\n return resp\n\n\nif __name__ == \"__main__\":\n # port = next_free_port()\n port = 3130\n l_host = f\"http://localhost:{port}/\"\n ipv4 = f\"http://{socket.gethostbyname(socket.gethostname())}:{port}/\"\n print(f\"\\n\\n\"\n f\"# Web app is hosted on port: {port}\\n\"\n f\"# To access the app go to\\n\"\n f\"# {l_host}\\n\"\n f\"# Or\\n\"\n f\"# {ipv4}\\n \\n\")\n print(f\"The link below is broken, follow the above steps to access the web app\")\n app.run(debug=False, host='0.0.0.0', port=port)\n\n" }, { "alpha_fraction": 0.390341579914093, "alphanum_fraction": 0.393521785736084, "avg_line_length": 41.45000076293945, "blob_id": "54ed1e2214795501033db0ccb3ac3bfed84e2eaf", "content_id": "f0c8a45869982635ba960ee9a4ea882eab6ddea8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8490, "license_type": "no_license", "max_line_length": 166, "num_lines": 200, "path": "/static/scripts.js", "repo_name": "aidotse/Sentiment-web-app", "src_encoding": "UTF-8", "text": "var result_data\nfilename = \"\"\nvar table_data\nvar classification_breakpoint = 0\n\n\nfunction clearBox(elementID){\n document.getElementById(elementID).innerHTML = \"\";\n };\n// initiate the result table \n$(document).ready(function(){\n $('#res_table').bootstrapTable({\n search: true,\n columns: [{\n field: 'message',\n title: 'Message',\n sortable: true\n }, {\n field: 'pred',\n title: 'Prediction',\n sortable: true\n },{\n field: 'clss',\n title: 'Classification',\n sortable: true\n }, ],\n \n data: table_data\n })\n })\n\n// init popovers \n$(document).ready(function(){\n $('[data-toggle=\"popover\"]').popover();\n})\n\nfunction convertToCSV(objArray) {\n var array = typeof objArray != 'object' ? JSON.parse(objArray) : objArray;\n var str = '';\n\n for (var i = 0; i < array.length; i++) {\n var line = '';\n for (var index in array[i]) {\n if (line != '') line += ','\n\n line += array[i][index];\n }\n\n str += line + '\\r\\n';\n }\n\n return str;\n }\n\nfunction ajaxOnSubmit(){clearBox('info_box');\n $(\".info_box\").append(`Processing the data`);}\nfunction ajaxOnError(){}\nfunction ajaxCallback(data){clearBox('info_box');\n clearBox('msg');\n result_data = data;\n if ($(\"#model-select\").val() == 'violence'){\n classification_breakpoint = 0.575}\n else if ($(\"#model-select\").val() == 'fear'){\n classification_breakpoint = 0.5};\n if (data.message == \"no_data_uploaded_or_in_text_area_\"){\n $(\".info_box\").append(`No data available`);\n $(\"#res_table_div\").hide()\n }\n else{\n // If predictionresults are present, load them in the table, allways \n var table_data = [];\n var classification_all = [];\n for (i = 0; i < data.message.length; i++){\n // do classification for each prediction\n if(data.pred[i] < classification_breakpoint){\n // if pred < breakpoint \n clss = 0\n }\n else{\n // else classify as positive\n clss = 1\n }\n new_line = {'message':data.message[i], 'pred':(data.pred[i]).toFixed(2), 'clss':clss};\n classification_all.push(clss);\n table_data[i]=new_line;\n };\n\n result_data.classification = classification_all;\n $('#res_table').bootstrapTable('load', table_data);\n $(\"#res_table_div\").show()\n }\n }\n \n\nfunction exportJson(el) {\n var data = \"text/json;charset=utf-8,\" + encodeURIComponent(JSON.stringify({\"message\":result_data.message,\"prediction\":result_data.pred}));\n el.setAttribute(\"href\", \"data:\"+data);\n el.setAttribute(\"download\", \"results.json\");}\n\nfunction exportCSV(el) { var csv_string = \"\";\n for (i = 0; i < result_data.message.length; i++){\n tt = result_data.message[i].replace(/,/g,';') // replace commas with semi-commas (, -> ;) to avoid erros in the csv\n csv_string += `${tt},${result_data.pred[i]}, ${result_data.classification[i]}\\r\\n`}\n \n var csv = csv_string\n\n var blob = new Blob([csv], { type: 'text/csv;charset=utf-8;' });\n if (navigator.msSaveBlob) { // IE 10+\n navigator.msSaveBlob(blob, 'result.csv');\n } else {\n var link = document.createElement(\"a\");\n if (link.download !== undefined) { // feature detection\n // Browsers that support HTML5 download attribute\n var url = URL.createObjectURL(blob);\n link.setAttribute(\"href\", url);\n link.setAttribute(\"download\", 'result.csv');\n link.style.visibility = 'hidden';\n document.body.appendChild(link);\n link.click();\n document.body.removeChild(link);\n }\n }\n }\n\n\n\n// {# Modified version of ajax_form_submit for two buttons #}\n$(document).ready(function() {\n // User Submit form ajax handling with button instead\n $('#submitform').click(function (e) {\n ajaxOnSubmit(); // DEFINE THIS FUNC\n $.ajax({\n type: \"POST\",\n url: \"/pred_endpoint\",\n data: {message: $(\"#message\").val(), \n model: $(\"#model-select\").val(),\n filename: filename, \n group_result: $(\"#results-select\").val()},\n beforeSend: function(){\n // Show image container\n $(\"#loader\").show()},\n success: function (data) {\n ajaxCallback(data),\n $(\"#loader\").hide(); // DEFINE THIS FUNC\n },\n error: function(ajaxResponse, errorStr) {\n ajaxOnError(ajaxResponse, errorStr); // DEFINE THIS FUNC\n },\n timeout: 90*1000\n });\n e.preventDefault();\n });\n});\n\n$(document).ready(function (e) {\n $('#upload').on('click', function () {\n var form_data = new FormData();\n var ins = document.getElementById('multiFiles').files.length;\n\n if(ins == 0) {\n filename = \"\";\n $('#msg').html('<span style=\"color:red\">Select at least one file</span>');\n return;\n }\n\n if(ins > 1) {\n filename = \"\";\n $('#msg').html('<span style=\"color:red\">Select only one file</span>');\n return;\n }\n\n for (var x = 0; x < ins; x++) {\n form_data.append(\"files[]\", document.getElementById('multiFiles').files[x]);\n }\n\n $.ajax({\n url: 'python-flask-files-upload', // point to server-side URL\n dataType: 'json', // what to expect back from server\n cache: false,\n contentType: false,\n processData: false,\n data: form_data,\n type: 'post',\n success: function (response) { // display success response\n filename = response.filename ;\n clearBox('msg');\n $('#msg').append(response.message + '<br/>');\n clearBox('info_box');\n $(\".info_box\").append(`Will evaluate: ` + filename.bold());\n $(\".info_box\").append(` if the textbox is empty`);\n },\n error: function (response) {\n clearBox('msg');\n // $('#msg').html(response.message); // display error response\n $('#msg').append(response['responseJSON'].message);\n return;\n }\n });\n });\n});\n" }, { "alpha_fraction": 0.7427055835723877, "alphanum_fraction": 0.7625994682312012, "avg_line_length": 43.35293960571289, "blob_id": "d5857b524812c82fa6577eae9185b2428aeb212a", "content_id": "4975a07b82b18697af95f5b6f55d63b7083b60ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2262, "license_type": "no_license", "max_line_length": 207, "num_lines": 51, "path": "/README.md", "repo_name": "aidotse/Sentiment-web-app", "src_encoding": "UTF-8", "text": "## User guide to the app\nA user guide to the web app is available in the app. It is essentially the guide.txt with a bit of formatting. \n\n## Run \nThe web app can be run by pulling the the repo. \n\nStart with installing the Python dependencies:\n`pip install -r requirements.txt`\n\nWhen the dependencies are installed launch the app from the directory with the command:\n`python app.py`\n\nThe app will launch and start downloading the Bert models from huggingface, this step will take a while but should only need to be done once. \n\nThe app will be available at: \n* `http://localhost:3130/`\nOr through your IPv4 Address in your browser of choice, access IPv4 Address by passing `ipconfig` in `CMD` or the corresponding call for your os. \n* `http://IPv4 Address:3130/`\n \n## Docker: \n#### Build\nThe web app can be packaged into a docker container using the included Docker file using \n`docker build --tag name:tag` \n\n### Pull from hub\nThis application is available from docker hub the latest tag is the version currently available on Github. \n\nPull the repo using the command \n* `docker pull rffmoller/swedish-bert-web-app`\nNo tag needs to be included, it reverts to the latest.\n\nto run the container use the following call: \n* `docker run -p 3130:3130 rffmoller/swedish-bert-web-app`\n\nThe `-p 3130:3130` input will map the web app which always will be hosted on port 3130 in the docker container to your local port 3130. (local port: docker port, you can use what you want for the local port)\n\nWhen the app has launched inside the container you can access the web app in your browser of choice by writing: \n* `http://localhost:3130/`\n\nOr through your IPv4 Address, access it through `ipconfig` in `CMD` or the corresponding call for your os. \n* `http://IPv4 Address:3130/`\n\n## Models:\nThe models used for the sentiment classification are available for download and standalone usage at:\n[Fear model](https://huggingface.co/RecordedFuture/Swedish-Sentiment-Fear) \n[Violence model](https://huggingface.co/RecordedFuture/Swedish-Sentiment-Violence)\n\nBefore running. Make sure you have a valid Bert model directory inside of the \"Bert-app\" dir. \nChange the name in the \"load_classifier\" function in app.py \n\nExcept for this the app should be able to run on any system\n" }, { "alpha_fraction": 0.7074829936027527, "alphanum_fraction": 0.7278911471366882, "avg_line_length": 23.66666603088379, "blob_id": "5e76de8c091fce9782790c6a25e0d0ea6f624035", "content_id": "9ca50cb849f8127a50eed3214dd6a35f20587195", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 147, "license_type": "no_license", "max_line_length": 35, "num_lines": 6, "path": "/Dockerfile", "repo_name": "aidotse/Sentiment-web-app", "src_encoding": "UTF-8", "text": "FROM python:3.8.6-slim\nWORKDIR /Bert-app\nADD . /Bert-app\nRUN pip install --upgrade pip \nRUN pip install -r requirements.txt\nCMD [\"python\",\"app.py\"]" } ]
6
leviresende/Treinamento-CNN-Hub-Inovacao-Vale-Carajas
https://github.com/leviresende/Treinamento-CNN-Hub-Inovacao-Vale-Carajas
581e2c122f640f143e3c3be5f3bc2b21f7dce332
caf63a103cacf8db8cc1769e56c2c13409493a0a
47ec168802bfde89c1a060dfde0e8f44141b5bd9
refs/heads/master
2020-09-26T06:20:16.909447
2019-12-03T16:18:55
2019-12-03T16:18:55
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7881730198860168, "alphanum_fraction": 0.7934686541557312, "avg_line_length": 32.32352828979492, "blob_id": "0f926913922c9b0b0c25e662fb15a5f8326dccd9", "content_id": "04f3695b836fc31036f42740356c7d00280856c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1164, "license_type": "no_license", "max_line_length": 203, "num_lines": 34, "path": "/README.md", "repo_name": "leviresende/Treinamento-CNN-Hub-Inovacao-Vale-Carajas", "src_encoding": "UTF-8", "text": "# Exemplo prático de CNN para o Hub de Inovacao da Vale em Carajas.\n\nExemplo prático de treinamento de uma rede neural convolucional para detecção de buracos em pistas por meio de imagens utilizando o framework pytorch.\n\n## Palestrante: \n\n### André Almeida Santos\n\nÉ um dos principais pesquisadores da área de percepção do projeto Rosi: Robô de Serviço de Inspeção, uma colaboração entre ITV, UFRJ, UFMG e Vale – Porto da Madeira.\n\nEngenheiro Eletricista pelo Instituto Federal da Bahia. É mestrando em Instrumentação, Controle e Automação de Processos de Mineração pelo Instituto Tecnológico Vale e Universidade Federal de Ouro Preto.\n\n\n### Email:\n\[email protected]\n\n## Biliotecas Necessárias\n\nAs bibliotecas necessárias para rodar os códigos estão listadas abaixo:\n\n\nmatplotlib\n\nnumpy\n\nos\n\n\nAs bibliotecas torch e torvision também devem ser instaladas e podem ser baixadas de acordo o seu sistema. O comando de instalação pode ser gerado diretamente no site https://pytorch.org/\n\n## Conjunto de Imagens\n\nAs imagens foram coletadas do google e redimensionadas para um tamanho de 224 x 224 pixels, padronizando o tamanho da imagem para o input do sistema.\n" }, { "alpha_fraction": 0.537960946559906, "alphanum_fraction": 0.5553145408630371, "avg_line_length": 37.22279739379883, "blob_id": "0e10e24b0eaaa4573c5cb653098c0ea0195df484", "content_id": "e68f6a61a3253b510bf1a91c8c1ab14339989e7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7427, "license_type": "no_license", "max_line_length": 98, "num_lines": 193, "path": "/cnnBuracos.py", "repo_name": "leviresende/Treinamento-CNN-Hub-Inovacao-Vale-Carajas", "src_encoding": "UTF-8", "text": "# Imports necessários\n# *****************************************************************************\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torchvision import datasets\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n\n# Transformações nas imagens para processamento:\n# Transformar para tensor\n# Normalizar os canais RGB com uma média e desvio padrão de 0.5\n# *****************************************************************************\ndata_transforms = {\n 'treinamento': transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ]),\n 'teste': transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n ])\n}\n\n# Caminho das pastas de imagens para treinamento e teste\n# Mude este caminho de acordo o seu sistema!!!\n# *****************************************************************************\ndata_dir = 'D:\\Documentos\\Vale - Hub Inovação\\ImagensCNNBuracos'\n\n# Criação dos conjuntos de dados\n# *****************************************************************************\nimage_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x])\n for x in ['treinamento', 'teste']}\n\n# Agrupamento das imagens em um dataloader para processamento\n# Pense nisso como um container com as imagens agrupadas em lotes\n# *****************************************************************************\ndataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=5,\n shuffle=True, num_workers=1)\n for x in ['treinamento', 'teste']}\n\n# Informações úteis dos conjuntos para serem mostradas no terminal\n# *****************************************************************************\ndataset_sizes = {x: len(image_datasets[x]) for x in ['treinamento', 'teste']}\nprint(dataset_sizes)\nclass_names = image_datasets['treinamento'].classes\nprint(class_names)\n\nprint('Quantidade de imagens para treinamento: ', dataset_sizes['treinamento'])\nprint('Quantidade de imagens para teste: ', dataset_sizes['teste'])\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(f'Device: {device}')\n\n\n# Função para visualizar algumas imagens\n# *****************************************************************************\ndef imshow(inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.5, 0.5, 0.5])\n std = np.array([0.5, 0.5, 0.5])\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n plt.pause(5) # pause a bit so that plots are updated\n\n\n# Busca um batch de imagens do grupo treinamento\n# As informações das imagens e das respectivas legendas são carregadas nas variáveis\ninputs, classes = next(iter(dataloaders['treinamento']))\n\n# Criação de um grid a partir do batch obtido acima\nout = torchvision.utils.make_grid(inputs, nrow=5)\n\n# Mostra as imagens do batch usando a função imshow()\nimshow(out, title=[class_names[x] for x in classes])\n\n\n# Classe que define a estrutura da rede CNN com as camadas de convolução \n# e classificação definidas de acordo a nossa escolha\n# *****************************************************************************\nclass Net(nn.Module):\n \n # Função inicial que define as camadas da rede\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, kernel_size=5)\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5)\n self.fc1 = nn.Linear(in_features=16 * 53 * 53, out_features=120)\n self.fc2 = nn.Linear(in_features=120, out_features=84)\n self.fc3 = nn.Linear(in_features=84, out_features=2)\n\n # Função necessária que define a passagem dos dados pela rede\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 16 * 53 * 53)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n# Criação do objeto da rede\n# *****************************************************************************\nnet = Net()\n\n# Criação dos critérios de treinamento: Função de erro e otimizador\n# Erro: CrossEntropyLoss\n# Otimizados: stochastic gradient descent\n# *****************************************************************************\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n\n\n# Loop de treinamento da rede \n# (Poder ser encapsulado em uma função!)\n# *****************************************************************************\nfor epoch in range(15): # loop over the dataset multiple times (Épocas)\n\n running_loss = 0.0\n for i, data in enumerate(dataloaders['treinamento'], 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 5 == 0: # print every 5 mini-batches\n print(f'[Epoch: {epoch + 1}, Batch: {i + 1}] loss: {running_loss / 5 :.5f}')\n running_loss = 0.0\n\nprint('Finished Training')\n\n# Etapa para salvar o modelo da rede treinanda\n# Mude este caminho de acordo o seu sistema!!!\n# *****************************************************************************\nPATH = 'D:\\Documentos\\Vale - Hub Inovação\\ImagensCNNBuracos/rede.pth'\ntorch.save(net.state_dict(), PATH)\n\n# Utilização da função imshow() para mostrar um batch do conjunto de teste\n# *****************************************************************************\ndataiter = iter(dataloaders['teste'])\nimages, labels = dataiter.next()\n\n# Mostra as legendas das imagens de teste\nimshow(torchvision.utils.make_grid(images))\nprint('GroundTruth: ', ' '.join('%5s' % class_names[labels[j]] for j in range(5)))\n\n# Carregamento da rede salva para classificar as imagens do batch de teste\n# *****************************************************************************\nnet = Net()\nnet.load_state_dict(torch.load(PATH))\n\noutputs = net(images)\n\n_, predicted = torch.max(outputs, 1)\n\n# Mostra o resultado das classificações das imagens do conjunto de teste\nprint('Predicted: ', ' '.join('%5s' % class_names[predicted[j]]\n for j in range(5)))\n\n# Loop para classificar todas as imagens de teste e verificar a taxa de acerto\n# *****************************************************************************\ncorrect = 0\ntotal = 0\nwith torch.no_grad():\n for data in dataloaders['teste']:\n images, labels = data\n outputs = net(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\nprint(f'Taxa de classificações corretas para as 60 imagens de teste: {(100 * correct / total)} %')" } ]
2
r0rishav0v/Machine-learning
https://github.com/r0rishav0v/Machine-learning
6383b6b8fcff5ec8b4a40848c74e3d052831cfc3
d57bcd26da2c850b7f3bb0752876063ec4da60ce
077104eb27c66d889c2b7e4317b5fa49a719963d
refs/heads/master
2021-08-21T20:40:48.420266
2021-07-08T20:03:52
2021-07-08T20:03:52
243,459,290
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5064392685890198, "alphanum_fraction": 0.5137487053871155, "avg_line_length": 29.25, "blob_id": "3b82310d64af7524dfbdf332d9f5597cff07a461", "content_id": "7e23d2c486d411c0865f3f4c1219261e604e8d05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2873, "license_type": "no_license", "max_line_length": 83, "num_lines": 92, "path": "/Linear_Regression_Gradient_Descent.py", "repo_name": "r0rishav0v/Machine-learning", "src_encoding": "UTF-8", "text": "import numpy as np\r\n#from numba import jit, cuda\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass LinearRegression() :\r\n \r\n def __init__( self, learning_rate) :\r\n self.learning_rate = learning_rate\r\n \r\n #@cuda.jit \r\n def fit( self, X, Y ) :\r\n self.m, self.n = X.shape\r\n self.W = np.zeros( self.n )\r\n self.b = 0\r\n self.X = X\r\n self.Y = Y\r\n count = 0\r\n \r\n Y_pred = self.predict( self.X )\r\n Loss = sum(np.sqrt((Y - Y_pred)**2))/len(Y)\r\n print(\"First Loss\")\r\n print(Loss)\r\n self.update_weights()\r\n Y_pred = self.predict( self.X )\r\n New_Loss = sum(np.sqrt((Y - Y_pred)**2))/len(Y)\r\n print(\"Second Loss\")\r\n print(New_Loss)\r\n while(New_Loss < Loss):\r\n Loss = New_Loss\r\n self.update_weights()\r\n Y_pred = self.predict( self.X )\r\n New_Loss = sum(np.sqrt((Y - Y_pred)**2))/len(Y)\r\n print(\"Loss\")\r\n print(New_Loss)\r\n count = count + 1\r\n print(count)\r\n return self\r\n \r\n # Helper function to update weights in gradient descent\r\n \r\n def update_weights( self ) :\r\n Y_pred = self.predict( self.X )\r\n # calculate gradients \r\n dW = - ( 2 * ( self.X.T ).dot( self.Y - Y_pred ) ) / self.m\r\n db = - 2 * np.sum( self.Y - Y_pred ) / self.m \r\n # update weights\r\n self.W = self.W - self.learning_rate * dW\r\n self.b = self.b - self.learning_rate * db\r\n return self\r\n \r\n # Hypothetical function h( x ) \r\n \r\n def predict( self, X ) :\r\n return X.dot( self.W ) + self.b\r\n\t\t\r\n\t\t\r\n\r\ndef main() :\r\n \r\n df = pd.read_csv('W:/Kaggle/References-master/References-master/Fake_data.csv')\r\n X = df.iloc[:,:-1].values\r\n Y = df.iloc[:,5].values\r\n # Splitting dataset into train and test set\r\n X_train, X_test, Y_train, Y_test = train_test_split( \r\n X, Y, test_size = 1/3, random_state = 0 )\r\n # Model training\r\n model = LinearRegression(learning_rate = 0.025 )\r\n model.fit( X_train, Y_train )\r\n # Prediction on test set\r\n Y_pred = model.predict( X_test )\r\n print( \"Predicted values \", np.round( Y_pred[:3], 2 ) ) \r\n print( \"Real values \", Y_test[:3] )\r\n print( \"Trained W \", model.W) \r\n print( \"Trained b \", round( model.b, 2 ) )\r\n # Visualization on test set \r\n print(Y_test)\r\n print(Y_pred)\r\n #print(model.Y_pred)\r\n \r\n print(df)\r\n #plt.scatter( X_test, Y_test, color = 'blue' )\r\n #plt.plot( X_test, Y_pred, color = 'orange' )\r\n #plt.title( 'Salary vs Experience' )\r\n #plt.xlabel( 'Years of Experience' )\r\n #plt.ylabel( 'Salary' )\r\n #plt.show()\r\n \r\nif __name__ == \"__main__\" : \r\n main()" }, { "alpha_fraction": 0.5865580439567566, "alphanum_fraction": 0.5940257906913757, "avg_line_length": 32.25581359863281, "blob_id": "ba9e533b2d17d1fcfccc380b2dfb5eb6a848e6ef", "content_id": "6d605cfdafc15202104c9f0a8d049ff77fe73dc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1473, "license_type": "no_license", "max_line_length": 96, "num_lines": 43, "path": "/One_way_Anova.py", "repo_name": "r0rishav0v/Machine-learning", "src_encoding": "UTF-8", "text": "# This is a sample Python script.\r\n\r\n# Press Shift+F10 to execute it or replace it with your code.\r\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and setting\r\nimport pandas as pd\r\n\r\n\r\n\r\ndef Kushkal_Wallis(df):\r\n df.columns = ['Categorical','Numerical']\r\n unique_categories = df['Categorical'].unique()\r\n my_dict = dict()\r\n k=0\r\n for i in unique_categories:\r\n Data = df[df['Categorical']==i]\r\n #my_dict = dict([(i, sum(Data['Numerical'], Data['Numerical'].count()))])\r\n my_dict[i] = [sum(Data['Numerical']),Data['Numerical'].count()]\r\n C = sum(df['Numerical']) / df['Numerical'].count()\r\n for i in unique_categories:\r\n z = my_dict[i][0]**2\r\n k= k+z\r\n SS_Total = k-C\r\n k = 0\r\n for i in unique_categories:\r\n z = (my_dict[i][0]**2)/my_dict[i][1])\r\n k= k+z\r\n SS_Between = k - C\r\n SS_within = SS_Total - SS_Between\r\n DF_Total = df['Numerical'].count() - 1\r\n DF_BTW = len(unique_categories) - 1\r\n DF_within = DF_Total - DF_BTW\r\n MSS_BTW = SS_Between/DF_BTW\r\n MSS_WITHIN = SS_within/DF_within\r\n F_ratio = MSS_BTW/MSS_WITHIN\r\n return my_dict, F_ratio, DF_BTW,DF_within\r\n\r\n# Press the green button in the gutter to run the script.\r\nif __name__ == '__main__':\r\n z,c,df_btw,df_within = Kushkal_Wallis(Data[['SaleCondition','SalePrice']])\r\n print(\"F ratio : \", c)\r\n\r\n\r\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\r\n" } ]
2
warleyzee/crud_api_python
https://github.com/warleyzee/crud_api_python
c22dca1f2ca1f8d2b3e95def8ff29f8cf32e629c
4598cf2a045f9b23c0e881f2151ce2c47a54b577
5bf90649c704fcc913e8732742da4b41d2485b6f
refs/heads/main
2023-06-23T06:59:35.598996
2021-07-19T00:07:55
2021-07-19T00:07:55
387,289,228
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6525307893753052, "alphanum_fraction": 0.6744186282157898, "avg_line_length": 21.875, "blob_id": "e492df7b415c48f89b1ab68457f9df54227335bf", "content_id": "9b658cced066c0c630873e7ec716f7647f69c87c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 731, "license_type": "no_license", "max_line_length": 63, "num_lines": 32, "path": "/chamada_api_python/consumers/obter_tokem.py", "repo_name": "warleyzee/crud_api_python", "src_encoding": "UTF-8", "text": "from os import write\nimport requests\nfrom requests import status_codes\nfrom pprint import pprint\n\nfrom requests.models import Response\n_print = print\nprint = pprint\n\nurl = 'http://127.0.0.1:3001/tokens'\n\ndata_token = {\n \"email\": \"[email protected]\",\n \"password\": \"senha_Valida\"\n}\n\nresponse = requests.post(url= url, json= data_token)\n\nif response.status_code >= 200 and response.status_code <= 299:\n print(response.status_code)\n print(response.reason)\n print(response.json())\n\n response_data = response.json()\n token = response_data['token']\n with open ('token.txt', 'w') as file:\n file.write(token)\nelse:\n print(\"ERRO!!!\")\n print(response.status_code)\n print(response.reason)\n print(response.json())" }, { "alpha_fraction": 0.7909516096115112, "alphanum_fraction": 0.7909516096115112, "avg_line_length": 57.272727966308594, "blob_id": "8f8e7dbf61cf9b32f1241132d9197ac3dc0dba30", "content_id": "3cc459a350dcba2b792f82318e31e9acefa7d903", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 660, "license_type": "no_license", "max_line_length": 234, "num_lines": 11, "path": "/README.md", "repo_name": "warleyzee/crud_api_python", "src_encoding": "UTF-8", "text": "### Termos e acordos\n\nAo iniciar este projeto, você concorda com as diretrizes do Código de Ética e Conduta e do Manual da Pessoa Estudante de Python.\n\n# Boas vindas ao repositório CRUD com API em Python!\n\nVocê já usa o GitHub diariamente para desenvolver os exercícios, certo? Agora, para desenvolver os projetos, você deverá seguir as instruções a seguir. Fique atento a cada passo, e se tiver qualquer dúvida, nos envie por Slack! #vqv 🚀\n\nAqui você vai encontrar os detalhes de como estruturar o desenvolvimento do seu projeto a partir deste repositório, utilizando uma branch específica e um Pull Request para colocar seus códigos.\n\n---\n" }, { "alpha_fraction": 0.6790299415588379, "alphanum_fraction": 0.703281044960022, "avg_line_length": 23.20689582824707, "blob_id": "5790ba820fc5be3edab53515c1f9587de40175fa", "content_id": "a2797d40bf2e621c374f7934494c218da9551a70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 701, "license_type": "no_license", "max_line_length": 156, "num_lines": 29, "path": "/chamada_api_python/consumers/get_aluno.py", "repo_name": "warleyzee/crud_api_python", "src_encoding": "UTF-8", "text": "import requests\nfrom requests import status_codes\nfrom pprint import pprint\nfrom get_token import token\n\n_print = print\npprint = print\n\nurl = \"http://127.0.0.1:3001/alunos/1\"\n\nheaders = {\n \"Authorization\": token\n}\n\nresponse = requests.get(url=url, headers=headers)\n\n\n\nif response.status_code >= 200 and response.status_code <= 299:\n print(response.status_code)\n print(response.reason)\n\n response_data = response.json()\n print(response_data['nome'], response_data['sobrenome'], response_data['email'], response_data['idade'], response_data['peso'], response_data['altura'])\nelse:\n print(\"ERROR!!!\")\n print(response.status_code)\n print(response.reason)\n print(response.json())" }, { "alpha_fraction": 0.606789231300354, "alphanum_fraction": 0.6492220759391785, "avg_line_length": 20.42424201965332, "blob_id": "bf2f5733a7a6a396111b01e3b25db371484630d4", "content_id": "c0bb36b75038462760af735297b363815391daf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 707, "license_type": "no_license", "max_line_length": 67, "num_lines": 33, "path": "/chamada_api_python/consumers/criar_aluno.py", "repo_name": "warleyzee/crud_api_python", "src_encoding": "UTF-8", "text": "import requests\nfrom requests import status_codes\nfrom pprint import pprint\nfrom get_token import token\n_print = print\npprint = print\n\nurl = \"http://127.0.0.1:3001/alunos\"\n\naluno_data = {\n \"nome\" : \"Villa\",\n \"sobrenome\" : \"Nova\",\n \"email\" : \"[email protected]\",\n \"idade\" : 29,\n \"peso\" : \"110.05\",\n \"altura\" : \"1.81\"\n}\n\nheaders = {\n 'Authorization' : token\n}\n\nresponse = requests.post(url=url, json=aluno_data, headers=headers)\n\nif response.status_code >= 200 and response.status_code <= 299:\n print(response.status_code)\n print(response.reason)\n print(response.json())\nelse:\n print(\"ERROR!!!\")\n print(response.status_code)\n print(response.reason)\n print(response.json())\n" }, { "alpha_fraction": 0.6529605388641357, "alphanum_fraction": 0.6792762875556946, "avg_line_length": 20.75, "blob_id": "ae9544d063f52cae18956ae5e389eae54dcd4f21", "content_id": "a551a8b375b9c1f0279fe661093b030f0c18cde3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "no_license", "max_line_length": 63, "num_lines": 28, "path": "/chamada_api_python/consumers/cria_user.py", "repo_name": "warleyzee/crud_api_python", "src_encoding": "UTF-8", "text": "import requests\nfrom requests import status_codes\nfrom pprint import pprint\n_print = print\nprint = pprint\n\nfrom requests import status_codes\n\nurl = 'http://127.0.0.1:3001/users'\n\nuser_data = {\n \"nome\":\"Vila\",\n \"password\": \"123456\",\n \"email\": \"[email protected]\"\n}\n\nresponse = requests.post(url=url, json=user_data)\n\nif response.status_code >= 200 and response.status_code <= 299:\n print(response.status_code)\n print(response.reason)\n print(response.text)\n print(response.json())\nelse:\n print(\"ERROR!!!!\")\n print(response.status_code)\n print(response.reason)\n print(response.json())" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.720703125, "avg_line_length": 21.30434799194336, "blob_id": "104790723a4938f8f2e1bfa001b6814b998eed12", "content_id": "d033ffaee187e44fffd920cdb512ac73575a0a34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 512, "license_type": "no_license", "max_line_length": 63, "num_lines": 23, "path": "/chamada_api_python/consumers/delete_aluno.py", "repo_name": "warleyzee/crud_api_python", "src_encoding": "UTF-8", "text": "import requests\nfrom requests import status_codes\nfrom get_token import token\nfrom pprint import pprint\n_print = print\npprint = print\n\nurl = 'http://127.0.0.1:3001/alunos/1'\n\nheaders = {\n \"Authorization\": token\n}\n\nresponse = requests.delete(url=url, headers=headers)\n\nif response.status_code >= 200 and response.status_code<= 299:\n print(response.status_code)\n print(response.reason)\n print(response.json())\nelse:\n print(response.status_code)\n print(response.reason)\n print(response.json())" }, { "alpha_fraction": 0.6143884658813477, "alphanum_fraction": 0.6589928269386292, "avg_line_length": 20.090909957885742, "blob_id": "d46aa8d4f25f06e8da5944e7048a441f8df1fec7", "content_id": "b4f07db4ba36629fabbb67a37061f7f52f6b8167", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 695, "license_type": "no_license", "max_line_length": 69, "num_lines": 33, "path": "/chamada_api_python/consumers/put_aluno.py", "repo_name": "warleyzee/crud_api_python", "src_encoding": "UTF-8", "text": "import requests\nfrom requests import status_codes\nfrom get_token import token\nfrom pprint import pprint\n_print = print\npprint = print\n\nurl = 'http://127.0.0.1:3001/alunos/3'\n\natualiza_data = {\n \"nome\" : \"rodrigo\",\n \"sobrenome\" : \"Villa\",\n \"email\" : \"[email protected]\",\n \"idade\" : 40,\n \"peso\" : \"80.5\",\n \"altura\" : \"1.77\"\n}\n\nheaders = {\n\n \"Authorization\" : token\n}\n\nresponse = requests.put(url=url, json=atualiza_data, headers=headers)\n\nif response.status_code >= 200 and response.status_code <= 299:\n print(response.status_code)\n print(response.reason)\n print(response.json())\nelse:\n print(response.status_code)\n print(response.reason)\n print(response.json())" } ]
7
maverick1313/DesarrolloWebEvidencia
https://github.com/maverick1313/DesarrolloWebEvidencia
9173f3904d231f793fd3fd523417f5ba1ca40ccd
0ef343a716a822cc2b0a3373fd7d16187cd46989
2d4147691e1d8aa1c43adba572d74cc9e84575a8
refs/heads/master
2021-08-24T09:15:18.622437
2017-12-09T00:46:38
2017-12-09T00:46:38
113,525,398
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3945502042770386, "alphanum_fraction": 0.4050018787384033, "avg_line_length": 28.10869598388672, "blob_id": "ae9def80ac3739dc4fb33f324d660aa0f9ab3ffb", "content_id": "c4b87be0461b343e37e766c0877e5887b1ad79c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2679, "license_type": "no_license", "max_line_length": 102, "num_lines": 92, "path": "/fundacion_(web)/app/getpublicdata.js", "repo_name": "maverick1313/DesarrolloWebEvidencia", "src_encoding": "UTF-8", "text": "\nfunction getData() \n{\n\n //alert ( \"estoy entrando a get Data\");\n\n\tsessionStorage.filial = \"puebla\";\n\n jQuery.support.cors = true;\n try\n { \n $.ajax({\n url: \"/gettweets\",\n dataType: 'json',\n cache: false,\n contentType: false,\n processData: true,\n data: {filial: sessionStorage.filial}, \n type: 'get',\n crossDomain: true,\n success: function(response) {\n\t tweets = response;\n //alert(response);\n tweets.forEach(function (tweet) \n {\n var nombre = \"<div class='col-md-3 col-sm-3 wow fadeInUp' \" +\n \t\t\t\" data-wow-delay='0.2s'> \" +\n \"<img src='\" + tweet.urlImage + \"'\" +\n \" class='img-responsive img-circle' alt='team img' heigth='150' width='150'\" +\n \" >\" +\n \" <div class='section-title wow bounceIn'> \" +\n \"<h3>\" + tweet.title + \"</h3>\" +\n \"<h5>\" + tweet.description + \"</h5>\" +\n \"</div>\" +\n \"</div>\" \n $(\"#tweets\").append(nombre);\n });\n\t getMedicinas()\n \t }\n }); \n \n }\n catch(e)\n {\n alert(\"error : \" + e);\n }\n}\n\nfunction getMedicinas() \n{\n\n //alert ( \"estoy entrando a get Data\");\n\n sessionStorage.filial = \"puebla\";\n\n jQuery.support.cors = true;\n try\n { \n $.ajax({\n url: \"/getmedicinas\",\n dataType: 'json',\n cache: false,\n contentType: false,\n processData: true,\n data: {filial: sessionStorage.filial}, \n type: 'get',\n crossDomain: true,\n success: function(response) {\n medicinas = response;\n alert(response);\n medicinas.forEach(function (medicina) \n {\n var nombre = \"<div class='col-md-3 col-sm-3 wow fadeInUp' \" +\n \" data-wow-delay='0.2s'> \" +\n \"<img src='\" + medicina.urlImage + \"'\" +\n \" class='img-responsive img-circle' alt='team img' heigth='150' width='150'\" +\n \" >\" +\n \" <div class='section-title wow bounceIn'> \" +\n \"<h3>\" + medicina.title + \"</h3>\" +\n \"<h5>\" + medicina.description + \"</h5>\" +\n \"</div>\" +\n \"</div>\" \n $(\"#medicinas\").append(nombre);\n });\n }\n }); \n \n }\n catch(e)\n {\n alert(\"error : \" + e);\n }\n}\n" }, { "alpha_fraction": 0.5867490172386169, "alphanum_fraction": 0.5881527066230774, "avg_line_length": 33.931373596191406, "blob_id": "095c45f318e515cdcf330eadaf6a1a97fce9956a", "content_id": "83c8ab944690656f66461d9927773c1c3fd79823", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3562, "license_type": "no_license", "max_line_length": 131, "num_lines": 102, "path": "/fundacion_(androidstudioproject)/app/src/main/java/itesm/tweet/MytweetActivity.java", "repo_name": "maverick1313/DesarrolloWebEvidencia", "src_encoding": "UTF-8", "text": "package itesm.tweet;\n\nimport android.support.v7.app.AppCompatActivity;\nimport android.os.Bundle;\n\nimport android.content.Intent;\nimport android.media.session.MediaSession;\nimport android.os.AsyncTask;\nimport android.support.v7.app.AppCompatActivity;\nimport android.os.Bundle;\nimport android.view.View;\nimport android.widget.Button;\nimport android.widget.EditText;\nimport android.widget.Toast;\n\nimport itesm.SyncTaskClass.tweetSyncTask;\nimport itesm.tweet_api.model.MessagesCodeMessage;\nimport itesm.tweet_api.model.MessagesTweetInput;\n\nimport java.util.concurrent.ExecutionException;\n\npublic class MytweetActivity extends AppCompatActivity {\n\n Button btnTweet;\n EditText edtTitle;\n EditText edtDescription;\n EditText edtUrlImage;\n\n @Override\n protected void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n setContentView(R.layout.activity_mytweet);\n\n btnTweet = (Button) findViewById(R.id.btnTweet);\n edtTitle = (EditText) findViewById(R.id.edtTitle);\n edtDescription = (EditText) findViewById(R.id.edtDescription);\n edtUrlImage = (EditText) findViewById(R.id.edtUrlImage);\n edtUrlImage.setText(\"https://adsoft-iosclient.appspot.com/images/team-img3.jpg\");\n\n\n btnTweet.setOnClickListener(new View.OnClickListener() {\n\n @Override\n public void onClick(View v) {\n\n String title = edtTitle.getText().toString().trim();\n String description = edtDescription.getText().toString().trim();\n String urlImage = edtUrlImage.getText().toString().trim();\n\n\n if ((title.length() == 0) || (description.length() == 0))\n {\n Toast.makeText(MytweetActivity.this,\n \"Necesitas ingresar tu titulo y descripcion.\",\n Toast.LENGTH_SHORT).show();\n return;\n }\n\n Intent intent = getIntent();\n String token = intent.getStringExtra(\"Token\");\n\n\n Toast.makeText(MytweetActivity.this, title + \" , \" + description + \" \" + token,\n Toast.LENGTH_LONG).show();\n\n String[] params = {title, description, token, urlImage};\n Toast.makeText(MytweetActivity.this, \"title: \" + title + \" description: \" + description, Toast.LENGTH_LONG).show();\n\n AsyncTask<String, Void, MessagesCodeMessage> execute =\n new tweetSyncTask(MytweetActivity.this).execute(params);\n String Message = new String();\n\n\n\n /*LoginTask(LoginActivity.this).execute(params);*/\n try {\n Message = execute.get().getMessage();\n //Toast.makeText(LoginActivity.this,\"Token: \"+execute.get().getToken(),Toast.LENGTH_SHORT).show();\n } catch (InterruptedException e) {\n e.printStackTrace();\n } catch (ExecutionException e){\n e.printStackTrace();\n }\n finally\n {\n\n if(Message != null) {\n\n Toast.makeText(MytweetActivity.this,\" Message: \"+ Message,Toast.LENGTH_SHORT).show();\n\n //Intent myIntent = new Intent(MytweetActivity.this, MytweetActivity.class);\n\n //myIntent.putExtra(\"Message: \", Message);\n //startActivity(intent);\n }\n\n }\n }\n });\n\n }\n}" }, { "alpha_fraction": 0.6831368207931519, "alphanum_fraction": 0.6872936487197876, "avg_line_length": 40.669532775878906, "blob_id": "006fe67e50c3d7c64708d21fad813b6e1660db63", "content_id": "81861b00a54e6f5d2bf5709244781f313ebeddb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 33920, "license_type": "no_license", "max_line_length": 221, "num_lines": 814, "path": "/fundacion_web/web_token_api.py", "repo_name": "maverick1313/DesarrolloWebEvidencia", "src_encoding": "UTF-8", "text": "import endpoints\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import app_identity\nfrom protorpc import remote\n\nimport jwt\nimport time\n\nfrom CustomExceptions import NotFoundException\n\nfrom messages import EmailPasswordMessage, TokenMessage, CodeMessage, Token, TokenKey,MessageNone\nfrom messages import FilialInput, FilialUpdate, FilialList\nfrom messages import TweetInput, TweetUpdate, TweetList\nfrom messages import UserInput, UserUpdate, UserList\nfrom messages import ProductInput, ProductUpdate, ProductList\nfrom messages import MedicinaInput, MedicinaUpdate, MedicinaList\nfrom messages import ViveresInput, ViveresUpdate, ViveresList\n\nfrom endpoints_proto_datastore.ndb import EndpointsModel\n\nimport models\nfrom models import validarEmail\nfrom models import Filial, Usuarios, Tweet, Product, Medicina, Viveres\n\n\n###############\n# Product\n###############\[email protected](name='products_api', version='v1', description='products endpoints')\nclass ProductsApi(remote.Service):\n###############get the info of one########\n# insert\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(ProductInput, CodeMessage, path='product/insert', http_method='POST', name='product.insert')\n#siempre lleva cls y request\n def product_add(cls, request):\n try:\n token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id']) #obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de\n myproduct = Product()\n if myproduct.product_m(request, user.key)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS\n codigo=1\n else:\n codigo=-3\n #la funcion josue_m puede actualizar e insertar\n #depende de la ENTRADA de este endpoint method\n message = CodeMessage(code=codigo, message='Product added')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n ########################## product list ###################\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(Token, ProductList, path='products/list', http_method='POST', name='product.list')\n def product_list(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret') #checa token\n user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token\n lista = [] #crea lista\n lstMessage = ProductList(code=1) # crea objeto mensaje\n lstBd = Product.query().fetch() # recupera de base de datos\n for i in lstBd: # recorre\n lista.append(ProductUpdate(token='',\n entityKey=i.entityKey,\n #filial_key=user.filial_key.urlsafe(),\n code=i.code,\n description=i.description,\n urlImage=i.urlImage)) # agrega a la lista\n \n lstMessage.data = lista # la manda al messa\n message = lstMessage #regresa\n \n except jwt.DecodeError:\n message = ProductList(code=-1, data=[]) #token invalido\n except jwt.ExpiredSignatureError:\n message = ProuctList(code=-2, data=[]) #token expiro\n return message\n\n###############get the info of one########\n @endpoints.method(TokenKey, ProductList, path='products/get', http_method='POST', name='product.get')\n def product_get(cls, request):\n try: \n token = jwt.decode(request.tokenint, 'secret') #checa token\n productentity = ndb.Key(urlsafe=request.entityKey)\n product = Product.get_by_id(productentity.id()) #obtiene usuario\n #user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token\n lista = [] #crea lista\n lstMessage = ProductList(code=1) # crea objeto mensaje\n lista.append(ProductUpdate(token='', \n entityKey= product.entityKey,\n #filial_key = user.filial_key.urlsafe(),\n code = product.code,\n description=product.description,\n urlImage=product.urlImage))\n lstMessage.data = lista#ASIGNA a la salida la lista\n message = lstMessage\n except jwt.DecodeError:\n message = ProductList(code=-1, data=[]) #token invalido\n except jwt.ExpiredSignatureError:\n message = ProductList(code=-2, data=[]) #token expiro\n return message\n\n# delete\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(TokenKey, CodeMessage, path='products/delete', http_method='POST', name='products.delete')\n #siempre lleva cls y request\n def product_remove(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN\n productentity = ndb.Key(urlsafe=request.entityKey)#Obtiene el elemento dado el EntitKey\n productentity.delete()#BORRA\n message = CodeMessage(code=1, message='Succesfully deleted')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n##update##\n# update\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(ProductUpdate, CodeMessage, path='products/update', http_method='POST', name='products.update')\n#siempre lleva cls y request\n def product_update(cls, request):\n try:\n token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de USUARIOS\n #filialkey = ndb.Key(urlsafe=user.filial_key.urlsafe())#convierte el string dado a entityKey\n product = Product()\n if product.product_m(request, user.key)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS\n codigo=1\n else:\n codigo=-3\n #la funcion josue_m puede actualizar e insertar\n #depende de la ENTRADA de este endpoint method\n message = CodeMessage(code=1, message='Sus cambios han sido guardados exitosamente')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n###############\n# Medicina\n###############\[email protected](name='medicinas_api', version='v1', description='medicinas endpoints')\nclass MedicinasApi(remote.Service):\n###############get the info of one########\n# insert\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(MedicinaInput, CodeMessage, path='medicina/insert', http_method='POST', name='medicina.insert')\n#siempre lleva cls y request\n def medicina_add(cls, request):\n try:\n token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id']) #obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de\n mymedicina = Medicina()\n if mymedicina.medicina_m(request, user.filial_key)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS\n codigo=1\n else:\n codigo=-3\n #la funcion josue_m puede actualizar e insertar\n #depende de la ENTRADA de este endpoint method\n message = CodeMessage(code=codigo, message='Medicine added')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n ########################## medicines list ###################\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(Token, MedicinaList, path='medicinas/list', http_method='POST', name='medicina.list')\n def medicina_list(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret') #checa token\n user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token\n lista = [] #crea lista\n lstMessage = MedicinaList(code=1) # crea objeto mensaje\n lstBd = Medicina.query().fetch() # recupera de base de datos\n for i in lstBd: # recorre\n lista.append(MedicinaUpdate(token='',\n entityKey=i.entityKey,\n #filial_key=user.filial_key.urlsafe(),\n title=i.title,\n description=i.description,\n urlImage=i.urlImage)) # agrega a la lista\n \n lstMessage.data = lista # la manda al messa\n message = lstMessage #regresa\n \n except jwt.DecodeError:\n message = MedicinaList(code=-1, data=[]) #token invalido\n except jwt.ExpiredSignatureError:\n message = MedicinaList(code=-2, data=[]) #token expiro\n return message\n\n###############get the info of one########\n @endpoints.method(TokenKey, MedicinaList, path='medicinas/get', http_method='POST', name='medicina.get')\n def medicina_get(cls, request):\n try: \n token = jwt.decode(request.tokenint, 'secret') #checa token\n medicinaentity = ndb.Key(urlsafe=request.entityKey)\n medicina = Medicina.get_by_id(medicinaentity.id()) #obtiene usuario\n #user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token\n lista = [] #crea lista\n lstMessage = MedicinaList(code=1) # crea objeto mensaje\n lista.append(MedicinaUpdate(token='',\n entityKey=medicinaentity.get().entityKey,\n #filial_key=teamentity.get().filial_key.urlsafe(), \n title=medicinaentity.get().title, \n #entityKey= medicina.entityKey,\n #filial_key = user.filial_key.urlsafe(),\n #title = medicina.title,\n description=medicina.description,\n urlImage=medicina.urlImage))\n lstMessage.data = lista#ASIGNA a la salida la lista\n message = lstMessage\n except jwt.DecodeError:\n message = MedicinaList(code=-1, data=[]) #token invalido\n except jwt.ExpiredSignatureError:\n message = MedicinaList(code=-2, data=[]) #token expiro\n return message\n\n# delete\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(TokenKey, CodeMessage, path='medicinas/delete', http_method='POST', name='medicinas.delete')\n #siempre lleva cls y request\n def medicina_remove(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN\n medicinaentity = ndb.Key(urlsafe=request.entityKey)#Obtiene el elemento dado el EntitKey\n medicinaentity.delete()#BORRA\n message = CodeMessage(code=1, message='Succesfully deleted')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n##update##\n# update\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(MedicinaUpdate, CodeMessage, path='medicinas/update', http_method='POST', name='medicinas.update')\n#siempre lleva cls y request\n def medicina_update(cls, request):\n try:\n token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de USUARIOS\n #filialkey = ndb.Key(urlsafe=user.filial_key.urlsafe())#convierte el string dado a entityKey\n medicina = Medicina()\n if medicina.medicina_m(request, user.key)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS\n codigo=1\n else:\n codigo=-3\n #la funcion josue_m puede actualizar e insertar\n #depende de la ENTRADA de este endpoint method\n message = CodeMessage(code=1, message='Sus cambios han sido guardados exitosamente')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n\n\n\n\n\n\n\n\n###############\n# Viveres\n###############\[email protected](name='viveres_api', version='v1', description='viveres endpoints')\nclass ViveresApi(remote.Service):\n###############get the info of one########\n# insert\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(ViveresInput, CodeMessage, path='viveres/insert', http_method='POST', name='viveres.insert')\n#siempre lleva cls y request\n def viveres_add(cls, request):\n try:\n token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id']) #obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de\n myviveres = Viveres()\n if myviveres.viveres_m(request, user.key)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS\n codigo=1\n else:\n codigo=-3\n #la funcion josue_m puede actualizar e insertar\n #depende de la ENTRADA de este endpoint method\n message = CodeMessage(code=codigo, message='Viveres added')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n ########################## viveres list ###################\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(Token, ViveresList, path='viveres/list', http_method='POST', name='viveres.list')\n def viveres_list(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret') #checa token\n user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token\n lista = [] #crea lista\n lstMessage = ViveresList(code=1) # crea objeto mensaje\n lstBd = Viveres.query().fetch() # recupera de base de datos\n for i in lstBd: # recorre\n lista.append(ViveresUpdate(token='',\n entityKey=i.entityKey,\n #filial_key=user.filial_key.urlsafe(),\n code=i.code,\n description=i.description,\n urlImage=i.urlImage)) # agrega a la lista\n \n lstMessage.data = lista # la manda al messa\n message = lstMessage #regresa\n \n except jwt.DecodeError:\n message = ViveresList(code=-1, data=[]) #token invalido\n except jwt.ExpiredSignatureError:\n message = ViveresList(code=-2, data=[]) #token expiro\n return message\n\n###############get the info of one########\n @endpoints.method(TokenKey, ViveresList, path='viveres/get', http_method='POST', name='viveres.get')\n def viveres_get(cls, request):\n try: \n token = jwt.decode(request.tokenint, 'secret') #checa token\n viveresentity = ndb.Key(urlsafe=request.entityKey)\n viveres = Viveres.get_by_id(viveresentity.id()) #obtiene usuario\n #user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token\n lista = [] #crea lista\n lstMessage = ViveresList(code=1) # crea objeto mensaje\n lista.append(ViveresUpdate(token='', \n entityKey= viveres.entityKey,\n #filial_key = user.filial_key.urlsafe(),\n code = viveres.code,\n description=viveres.description,\n urlImage=viveres.urlImage))\n lstMessage.data = lista#ASIGNA a la salida la lista\n message = lstMessage\n except jwt.DecodeError:\n message = ViveresList(code=-1, data=[]) #token invalido\n except jwt.ExpiredSignatureError:\n message = ViveresList(code=-2, data=[]) #token expiro\n return message\n\n# delete\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(TokenKey, CodeMessage, path='viveres/delete', http_method='POST', name='viveres.delete')\n #siempre lleva cls y request\n def viveres_remove(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN\n viveresentity = ndb.Key(urlsafe=request.entityKey)#Obtiene el elemento dado el EntitKey\n viveresentity.delete()#BORRA\n message = CodeMessage(code=1, message='Succesfully deleted')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n##update##\n# update\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(ViveresUpdate, CodeMessage, path='viveres/update', http_method='POST', name='viveres.update')\n#siempre lleva cls y request\n def viveres_update(cls, request):\n try:\n token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de USUARIOS\n #filialkey = ndb.Key(urlsafe=user.filial_key.urlsafe())#convierte el string dado a entityKey\n viveres = Viveres()\n if viveres.viveres_m(request, user.key)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS\n codigo=1\n else:\n codigo=-3\n #la funcion josue_m puede actualizar e insertar\n #depende de la ENTRADA de este endpoint method\n message = CodeMessage(code=1, message='Sus cambios han sido guardados exitosamente')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n\n\n\n\n\n\n\n\n\n\n\n###############\n# Usuarios\n###############\[email protected](name='usuarios_api', version='v1', description='usuarios endpoints')\nclass UsuariosApi(remote.Service):\n###############get the info of one########\n @endpoints.method(TokenKey, UserList, path='users/get', http_method='POST', name='users.get')\n def users_get(cls, request):\n try: \n token = jwt.decode(request.tokenint, 'secret') #checa token\n userentity = ndb.Key(urlsafe=request.entityKey)\n user = Usuarios.get_by_id(userentity.id()) #obtiene usuario\n #user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token\n lista = [] #crea lista\n lstMessage = UserList(code=1) # crea objeto mensaje\n lista.append(UserUpdate(token='', \n entityKey= user.entityKey,\n #filial_key = user.filial_key.urlsafe(),\n email = user.email))\n lstMessage.data = lista#ASIGNA a la salida la lista\n message = lstMessage\n except jwt.DecodeError:\n message = UserList(code=-1, data=[]) #token invalido\n except jwt.ExpiredSignatureError:\n message = UserList(code=-2, data=[]) #token expiro\n return message\n\n\n########################## list###################\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(Token, UserList, path='users/list', http_method='POST', name='users.list')\n def lista_usuarios(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret') #checa token\n user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token\n lista = [] #crea lista\n lstMessage = UserList(code=1) # crea objeto mensaje\n lstBd = Usuarios.query().fetch() # recupera de base de datos\n for i in lstBd: # recorre\n lista.append(UserUpdate(token='',\n entityKey=i.entityKey,\n #filial_key=user.filial_key.urlsafe(),\n email=i.email)) # agrega a la lista\n \n lstMessage.data = lista # la manda al messa\n message = lstMessage #regresa\n \n except jwt.DecodeError:\n message = UserList(code=-1, data=[]) #token invalido\n except jwt.ExpiredSignatureError:\n message = UserList(code=-2, data=[]) #token expiro\n return message\n\n# delete\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(TokenKey, CodeMessage, path='users/delete', http_method='POST', name='users.delete')\n #siempre lleva cls y request\n def user_remove(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN\n usersentity = ndb.Key(urlsafe=request.entityKey)#Obtiene el elemento dado el EntitKey\n usersentity.delete()#BORRA\n message = CodeMessage(code=1, message='Succesfully deleted')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n# insert\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(UserInput, CodeMessage, path='users/insert', http_method='POST', name='users.insert')\n def user_add(cls, request):\n try:\n token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id'])\n if validarEmail(request.email) == False: #checa si el email esta registrado\n #filialkey = ndb.Key(urlsafe=request.filial_key) #convierte el string dado a entityKey\n if user.usuario_m(request, user.filial_key)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS\n codigo=1\n else:\n codigo=-3\n #la funcion josue_m puede actualizar e insertar\n #depende de la ENTRADA de este endpoint method\n message = CodeMessage(code=codigo, message='Succesfully added')\n else:\n message = CodeMessage(code=-4, message='El email ya ha sido registrado')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n\n##login##\n\n @endpoints.method(EmailPasswordMessage, TokenMessage, path='users/login', http_method='POST', name='users.login')\n def users_login(cls, request):\n try:\n user = Usuarios.query(Usuarios.email == request.email).fetch() #obtiene el usuario dado el email\n if not user or len(user) == 0: #si no encuentra user saca\n raise NotFoundException()\n user = user[0] \n keye = user.filial_key.urlsafe() # regresa como mensaje el filial key\n if not user.verify_password(request.password): # checa la contrasena\n raise NotFoundException()\n\n token = jwt.encode({'user_id': user.key.id(), 'exp': time.time() + 43200}, 'secret') #crea el token\n message = TokenMessage(token=token, message=keye, code=1) # regresa token\n except NotFoundException:\n message = TokenMessage(token=None, message='Wrong username or password', code=-1)\n return message\n\n##update##\n# update\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(UserUpdate, CodeMessage, path='user/update', http_method='POST', name='user.update')\n#siempre lleva cls y request\n def user_update(cls, request):\n try:\n token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de USUARIOS\n filialkey = ndb.Key(urlsafe=user.filial_key.urlsafe())#convierte el string dado a entityKey\n if user.usuario_m(request, filialkey)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS\n codigo=1\n else:\n codigo=-3\n #la funcion josue_m puede actualizar e insertar\n #depende de la ENTRADA de este endpoint method\n message = CodeMessage(code=1, message='Sus cambios han sido guardados exitosamente')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n\n'''\n'''\n\n###########################\n#### Filial\n###########################\n\n\n## Google Cloud Endpoint\[email protected](name='filiales_api', version='v1', description='filiales REST API')\nclass FilialesApi(remote.Service):\n\n\n# get one\n\n @endpoints.method(TokenKey, FilialList, path='filial/get', http_method='POST', name='filial.get')\n#siempre lleva cls y request\n def filial_get(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN\n #Obtiene el elemento dado el entityKey\n filialentity = ndb.Key(urlsafe=request.entityKey)\n #CREA LA SALIDA de tipo JosueInput y le asigna los valores, es a como se declaro en el messages.py\n #filialentity.get().filial_key.urlsafe() para poder optener el EntityKey\n ##### ejemplo real\n ####### message = FilialList(code=1, data=[FilialUpdate(token='Succesfully get', nombre_filial=filialentity.get().nombre_filial, filial_key=filialentity.get().filial_key.urlsafe(), entityKey=filialentity.get().entityKey)])\n message = FilialList(code=1, data = [FilialUpdate(token='Succesfully get',\n entityKey = filialentity.get().entityKey,\n codigo_filial=filialentity.get().codigo_filial, \n nombre_filial = filialentity.get().nombre_filial)])\n\n except jwt.DecodeError:\n message = FilialList(code=-1, data=[])\n except jwt.ExpiredSignatureError:\n message = FilialList(code=-2, data=[])\n return message\n\n\n\n\n @endpoints.method(TokenKey, CodeMessage, path='filial/delete', http_method='POST', name='filial.delete')\n#siempre lleva cls y request\n def filial_remove(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN\n filialentity = ndb.Key(urlsafe=request.entityKey)#Obtiene el elemento dado el EntitKey\n filialentity.delete()#BORRA\n message = CodeMessage(code=1, message='Succesfully deleted')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n\n# insert\n @endpoints.method(FilialInput, CodeMessage, path='filial/insert', http_method='POST', name='filial.insert')\n#siempre lleva cls y request\n def filial_add(cls, request):\n try:\n token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario models.py \n myfilial = Filial()\n if myfilial.filial_m(request)==0: \n codigo=1\n else:\n\t\tcodigo=-3\n \t #la funcion josue_m puede actualizar e insertar\n\t #depende de la ENTRADA de este endpoint method\n message = CodeMessage(code=codigo, message='Succesfully added')\n #else:\n\t # message = CodeMessage(code=-4, message='Succesfully added')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n\n\n# update\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(FilialUpdate, CodeMessage, path='filial/update', http_method='POST', name='filial.update')\n#siempre lleva cls y request\n def filial_update(cls, request):\n try:\n token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN \n user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de USUARIOS\n #filialkey = ndb.Key(urlsafe=request.filial_key)#convierte el string dado a entityKey\n myfilial = Filial()\n if myfilial.filial_m(request)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS\n codigo=1\n else:\n codigo=-3\n #la funcion josue_m puede actualizar e insertar\n #depende de la ENTRADA de este endpoint method\n message = CodeMessage(code=1, message='Sus cambios han sido guardados exitosamente')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n\n\n# list\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(Token, FilialList, path='filial/list', http_method='POST', name='filial.list')\n#siempre lleva cls y request\n def filial_list(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token\n #if user.importante==1 or user.importante==2:\n lista = [] #crea lista para guardar contenido de la BD\n lstMessage = FilialList(code=1) #CREA el mensaje de salida\n lstBdFilial = Filial.query().fetch() #obtiene de la base de datos\n for i in lstBdFilial: #recorre la base de datos\n #inserta a la lista creada con los elementos que se necesiten de la base de datos\n #i.filial_key.urlsafe() obtiene el entityKey\n\t #lista.append(ClientesUpdate(token='', nombre=i.nombre, status=i.status, filial_key=i.filial_key.urlsafe(), entityKey=i.entityKey))\n lista.append(FilialUpdate(token='', \n entityKey = i.entityKey,\n codigo_filial=i.codigo_filial, \n nombre_filial = i.nombre_filial))\n \n lstMessage.data = lista #ASIGNA a la salida la lista\n message = lstMessage\n #else:\n # message = FilialList(code=-3, data=[])\n except jwt.DecodeError:\n message = FilialList(code=-1, data=[])\n except jwt.ExpiredSignatureError:\n message = FilialList(code=-2, data=[])\n return message\n\n\n###########################\n#### Tweets\n###########################\n\[email protected](name='tweet_api', version='v1', description='tweet REST API')\nclass TweetsApi(remote.Service):\n# get one\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(TokenKey, TweetList, path='tweet/get', http_method='POST', name='tweet.get')\n#siempre lleva cls y request\n def tweet_get(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN\n #Obtiene el elemento dado el entityKey\n tweetentity = ndb.Key(urlsafe=request.entityKey)\n #CREA LA SALIDA de tipo JosueInput y le asigna los valores, es a como se declaro en el messages.py\n #josuentity.get().filial_key.urlsafe() para poder optener el EntityKey\n message = TweetList(code=1, data=[TweetUpdate(token='Succesfully get',\n entityKey=tweetentity.get().entityKey,\n #filial_key=teamentity.get().filial_key.urlsafe(), \n title=tweetentity.get().title, \n description=tweetentity.get().description, \n urlImage=tweetentity.get().urlImage)])\n except jwt.DecodeError:\n message = TweetList(code=-1, data=[])\n except jwt.ExpiredSignatureError:\n message = TweetList(code=-2, data=[])\n return message\n\n\n# delete\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(TokenKey, CodeMessage, path='tweet/delete', http_method='POST', name='tweet.delete')\n#siempre lleva cls y request\n def tweet_remove(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN\n tweetentity = ndb.Key(urlsafe=request.entityKey)#Obtiene el elemento dado el EntitKey\n tweetentity.delete()#BORRA\n message = CodeMessage(code=0, message='tweet deleted')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n# list\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(Token, TweetList, path='tweet/list', http_method='POST', name='tweet.list')\n#siempre lleva cls y request\n def tweet_list(cls, request):\n try:\n token = jwt.decode(request.tokenint, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id']) #obtiene usuario dado el token\n lista = [] #crea lista para guardar contenido de la BD\n lstMessage = TweetList(code=1) #CREA el mensaje de salida\n lstBd = Tweet.query().fetch() #obtiene de la base de datos\n for i in lstBd: #recorre la base de datos\n #inserta a la lista creada con los elementos que se necesiten de la base de datos\n #i.filial_key.urlsafe() obtiene el entityKey\n\t \n lista.append(TweetUpdate(token='', \n entityKey=i.entityKey, \n #filial_key=i.filial_key.urlsafe(),\n title=i.title, \n description=i.description, \n urlImage=i.urlImage))\n lstMessage.data = lista #ASIGNA a la salida la lista\n message = lstMessage\n except jwt.DecodeError:\n message = TweetList(code=-1, data=[])\n except jwt.ExpiredSignatureError:\n message = TweetList(code=-2, data=[])\n return message\n\n# insert\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(TweetInput, CodeMessage, path='tweet/insert', http_method='POST', name='tweet.insert')\n#siempre lleva cls y request\n def tweet_add(cls, request):\n try:\n token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id']) #obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de\n mytweet = Tweet()\n if mytweet.tweet_m(request, user.filial_key)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS\n codigo=1\n else:\n codigo=-3\n #la funcion josue_m puede actualizar e insertar\n #depende de la ENTRADA de este endpoint method\n message = CodeMessage(code=codigo, message='Tweet added')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n# update\n# ENTRADA SALIDA RUTA siempre es POST NOMBRE\n @endpoints.method(TweetUpdate, CodeMessage, path='tweet/update', http_method='POST', name='tweet.update')\n#siempre lleva cls y request\n def tweet_update(cls, request):\n try:\n token = jwt.decode(request.token, 'secret')#CHECA EL TOKEN\n user = Usuarios.get_by_id(token['user_id'])#obtiene el usuario para poder acceder a los metodos declarados en models.py en la seccion de USUARIOS\n filialkey = ndb.Key(urlsafe=user.filial_key.urlsafe())#convierte el string dado a entityKey\n mytweet = Tweet()\n if mytweet.tweet_m(request, filialkey)==0:#llama a la funcion declarada en models.py en la seccion de USUARIOS\n codigo=1\n else:\n codigo=-3\n #la funcion josue_m puede actualizar e insertar\n #depende de la ENTRADA de este endpoint method\n message = CodeMessage(code=1, message='tweet updated')\n except jwt.DecodeError:\n message = CodeMessage(code=-2, message='Invalid token')\n except jwt.ExpiredSignatureError:\n message = CodeMessage(code=-1, message='Token expired')\n return message\n\n\napplication = endpoints.api_server([UsuariosApi, FilialesApi, TweetsApi, ProductsApi, MedicinasApi, ViveresApi], restricted=False)\n\n" }, { "alpha_fraction": 0.6585525274276733, "alphanum_fraction": 0.6611664891242981, "avg_line_length": 26.949771881103516, "blob_id": "f0c196ef20755cee6e3d2305692b8457fd6e81df", "content_id": "2cb1d9425b6192f27822c536a2201d788a1bf27f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6121, "license_type": "no_license", "max_line_length": 89, "num_lines": 219, "path": "/fundacion_(web)/public_rest_api.py", "repo_name": "maverick1313/DesarrolloWebEvidencia", "src_encoding": "UTF-8", "text": "import webapp2\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import app_identity\nfrom google.appengine.api import images\nfrom google.appengine.ext import blobstore\nimport cloudstorage\nimport mimetypes\nimport json\nimport os\nimport jinja2\n\nfrom models import Filial\nfrom models import Tweet\nfrom models import Medicina\n\njinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\n\nclass DemoClass(object):\n pass\n\ndef MyClass(obj):\n return obj.__dict__\n\n\nclass GetTweetsHandler(webapp2.RequestHandler):\n\n def get(self):\n self.response.headers.add_header('Access-Control-Allow-Origin', '*')\n self.response.headers['Content-Type'] = 'application/json'\n\n id_filial = self.request.get('filial')\n objemp = Filial.query(Filial.codigo_filial == id_filial).get()\n strKey = objemp.key # solo se obtiene el key de la filial\n myTweets = Tweet.query(Tweet.filial_key == strKey) # filtro por filial key\n\n myList = []\n for i in myTweets:\n myObj = DemoClass()\n myObj.title = i.title\n myObj.description = i.description\n myObj.urlImage = i.urlImage\n myList.append(myObj)\n \n json_string = json.dumps(myList, default=MyClass)\n self.response.write(json_string)\n\n\n\n###########################################################################\n\nclass GetMedicinasHandler(webapp2.RequestHandler):\n\n def get(self):\n self.response.headers.add_header('Access-Control-Allow-Origin', '*')\n self.response.headers['Content-Type'] = 'application/json'\n\n id_filial = self.request.get('filial')\n objemp = Filial.query(Filial.codigo_filial == id_filial).get()\n strKey = objemp.key # solo se obtiene el key de la filial\n myMedicinas = Medicina.query(Medicina.filial_key == strKey) # filtro por filial key\n\n myList = []\n for i in myMedicinas:\n myObj = DemoClass()\n myObj.title = i.title\n myObj.description = i.description\n myObj.urlImage = i.urlImage\n myList.append(myObj)\n \n json_string = json.dumps(myList, default=MyClass)\n self.response.write(json_string)\n\n###########################################################################\n\n\nclass UpHandler(webapp2.RequestHandler):\n def _get_urls_for(self, file_name):\n \n bucket_name = app_identity.get_default_gcs_bucket_name()\n path = os.path.join('/', bucket_name, file_name)\n real_path = '/gs' + path\n key = blobstore.create_gs_key(real_path)\n try:\n url = images.get_serving_url(key, size=0)\n except images.TransformationError, images.NotImageError:\n url = \"http://storage.googleapis.com{}\".format(path)\n\n return url\n\n\n def post(self):\n self.response.headers.add_header('Access-Control-Allow-Origin', '*')\n self.response.headers['Content-Type'] = 'application/json'\n\n bucket_name = app_identity.get_default_gcs_bucket_name()\n uploaded_file = self.request.POST.get('uploaded_file')\n file_name = getattr(uploaded_file, 'filename', None)\n file_content = getattr(uploaded_file, 'file', None)\n real_path = ''\n\n if file_name and file_content:\n content_t = mimetypes.guess_type(file_name)[0]\n real_path = os.path.join('/', bucket_name, file_name)\n\n with cloudstorage.open(real_path, 'w', content_type=content_t,\n options={'x-goog-acl': 'public-read'}) as f:\n f.write(file_content.read())\n\n key = self._get_urls_for(file_name)\n self.response.write(key)\n\n\nclass LoginHandler(webapp2.RequestHandler):\n\n def get(self):\n\n template_context = {}\n self.response.out.write(\n self._render_template('login.html', template_context))\n\n def _render_template(self, template_name, context=None):\n if context is None:\n context = {}\n\n template = jinja_env.get_template(template_name)\n return template.render(context)\n\n\nclass TweetHandler(webapp2.RequestHandler):\n\n def get(self):\n\n template_context = {}\n self.response.out.write(\n self._render_template('tweet.html', template_context))\n\n def _render_template(self, template_name, context=None):\n if context is None:\n context = {}\n\n template = jinja_env.get_template(template_name)\n return template.render(context)\n\n\nclass MenuHandler(webapp2.RequestHandler):\n\n def get(self):\n\n template_context = {}\n self.response.out.write(\n self._render_template('menu.html', template_context))\n\n def _render_template(self, template_name, context=None):\n if context is None:\n context = {}\n\n template = jinja_env.get_template(template_name)\n return template.render(context)\n\nclass MedicinasHandler(webapp2.RequestHandler):\n\n def get(self):\n\n template_context = {}\n self.response.out.write(\n self._render_template('medicinas.html', template_context))\n\n def _render_template(self, template_name, context=None):\n if context is None:\n context = {}\n\n template = jinja_env.get_template(template_name)\n return template.render(context)\n\nclass ViveresHandler(webapp2.RequestHandler):\n\n def get(self):\n\n template_context = {}\n self.response.out.write(\n self._render_template('viveres.html', template_context))\n\n def _render_template(self, template_name, context=None):\n if context is None:\n context = {}\n\n template = jinja_env.get_template(template_name)\n return template.render(context)\n\nclass MainHandler(webapp2.RequestHandler):\n\n def get(self):\n\n template_context = {}\n self.response.out.write(\n self._render_template('index.html', template_context))\n\n\n def _render_template(self, template_name, context=None):\n if context is None:\n context = {}\n\n template = jinja_env.get_template(template_name)\n return template.render(context)\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler),\n ('/login', LoginHandler),\n ('/tweets', TweetHandler),\n ('/menu', MenuHandler),\n ('/medicinas', MedicinasHandler),\n ('/viveres', ViveresHandler), \n ('/up', UpHandler),\n ('/getmedicinas', GetMedicinasHandler),\n ('/gettweets', GetTweetsHandler),\n], debug = True)\n" }, { "alpha_fraction": 0.6535577774047852, "alphanum_fraction": 0.6581484079360962, "avg_line_length": 34.906593322753906, "blob_id": "2518bcc6616d7782d4d029d56db4c2fd140e8faf", "content_id": "03d1f00cf71658778d1d777a8f17ff3a96159962", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6535, "license_type": "no_license", "max_line_length": 115, "num_lines": 182, "path": "/fundacion_(web)/models.py", "repo_name": "maverick1313/DesarrolloWebEvidencia", "src_encoding": "UTF-8", "text": "import base64\nimport Crypto\nfrom Crypto.Hash import SHA256\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext import blobstore\nfrom protorpc import remote\nfrom endpoints_proto_datastore.ndb import EndpointsModel\nimport endpoints\nfrom google.appengine.api import mail\nfrom google.appengine.ext.webapp import blobstore_handlers\n\nclass CustomBaseModel(EndpointsModel):\n def populate(self, data):\n super(self.__class__, self).__init__()\n for attr in self._message_fields_schema:\n if hasattr(data, attr):\n setattr(self, attr, getattr(data, attr))\n\n## filial\nclass Filial(CustomBaseModel):\n _message_fields_schema = ('entityKey', 'codigo_filial', 'nombre_filial')\n codigo_filial = ndb.StringProperty()\n nombre_filial = ndb.StringProperty()\n \n ###filial####\n def filial_m(self, data):\n filial = Filial()#Crea una variable de tipo Base de datos\n filial.populate(data)#Llena la variables con los datos dados por el request en main.py\n #filial.filial_key=filialkey #inserta el entityKey de la filial que es un parametro que se manda en main.py\n filial.put()#inserta o hace un update depende del main.py\n return 0\n\n\n\n#####USUARIOS#########\n\nclass Usuarios(CustomBaseModel):\n _message_fields_schema = ('entityKey', 'email', 'password', 'salt')\n\n filial_key = ndb.KeyProperty(kind=Filial)\n email = ndb.StringProperty()\n password = ndb.StringProperty()\n salt = ndb.StringProperty(indexed=False)\n \n \n def hash_password(self):\n \"\"\" Create a cryptographyc random secure salt and hash the password\n using the salt created and store both in the database, the password\n and the salt \"\"\"\n # Note: It is needed to encode in base64 the salt, otherwise it will\n # cause an exception trying to store non utf-8 characteres\n self.salt = base64.urlsafe_b64encode(\n Crypto.Random.get_random_bytes(16))\n hash_helper = SHA256.new()\n hash_helper.update(self.password + self.salt)\n self.password = hash_helper.hexdigest()\n\n def verify_password(self, password):\n \"\"\" Verify if the password is correct \"\"\"\n hash_helper = SHA256.new()\n hash_helper.update(password + self.salt)\n return hash_helper.hexdigest() == self.password\n\n ###Usuarios####\n def usuario_m(self, data, filialkey):\n user = Usuarios()#Crea una variable de tipo Base de datos\n user.populate(data)#Llena la variables con los datos dados por el request en main.py\n user.filial_key=filialkey\n user.status=1\n user.hash_password()#encripta la contrasena\n user.put()#inserta o hace un update depende del main.py\n return 0\n\n\n######### Tweets #########\n\nclass Tweet(CustomBaseModel):\n _message_fields_schema = ('entityKey', 'title', 'description', 'urlImage')\n filial_key = ndb.KeyProperty(kind=Filial)\n title = ndb.StringProperty()\n description = ndb.StringProperty()\n urlImage = ndb.StringProperty()\n \n ### Tweet ####\n def tweet_m(self, data, filialkey):\n tweet = Tweet()#Crea una variable de tipo Tweet\n tweet.populate(data)#Llena la variables con los datos dados por el request en main.py\n tweet.filial_key=filialkey#inserta el entityKey de la filial que es un parametro que se manda en main.py\n tweet.put()#inserta o hace un update depende del main.py\n return 0\n\n######### Product #########\n\nclass Product(CustomBaseModel):\n _message_fields_schema = ('entityKey', 'code', 'description', 'urlImage')\n user_key = ndb.KeyProperty(kind=Usuarios)\n code = ndb.StringProperty()\n description = ndb.StringProperty()\n urlImage = ndb.StringProperty()\n \n ### Add product ####\n def product_m(self, data, userkey):\n product = Product()#Crea una variable de tipo Tweet\n product.populate(data)#Llena la variables con los datos dados por el request en main.py\n product.user_key=userkey#inserta el entityKey de la filial que es un parametro que se manda en main.py\n product.put()#inserta o hace un update depende del main.py\n return 0\n\n######### Medicina #########\n\nclass Medicina(CustomBaseModel):\n _message_fields_schema = ('entityKey', 'title', 'description', 'urlImage')\n #user_key = ndb.KeyProperty(kind=Usuarios)\n filial_key = ndb.KeyProperty(kind=Filial)\n title = ndb.StringProperty()\n description = ndb.StringProperty()\n urlImage = ndb.StringProperty()\n \n ### Add medicina ####\n def medicina_m(self, data, filialkey):\n medicina = Medicina()#Crea una variable de tipo Tweet\n medicina.populate(data)#Llena la variables con los datos dados por el request en main.py\n medicina.filial_key=filialkey#inserta el entityKey de la filial que es un parametro que se manda en main.py\n medicina.put()#inserta o hace un update depende del main.py\n return 0\n\n######### Viveres #########\n\nclass Viveres(CustomBaseModel):\n _message_fields_schema = ('entityKey', 'code', 'description', 'urlImage')\n #user_key = ndb.KeyProperty(kind=Usuarios)\n filial_key = ndb.KeyProperty(kind=Filial)\n code = ndb.StringProperty()\n description = ndb.StringProperty()\n urlImage = ndb.StringProperty()\n \n ### Add medicina ####\n def viveres_m(self, data, userkey):\n viveres = Viveres()#Crea una variable de tipo Tweet\n viveres.populate(data)#Llena la variables con los datos dados por el request en main.py\n viveres.filial_key=filialkey#inserta el entityKey de la filial que es un parametro que se manda en main.py\n viveres.put()#inserta o hace un update depende del main.py\n return 0\n\n#### create demo\n\ndef validarEmail(email):\n emailv = Usuarios.query(Usuarios.email == email)\n if not emailv.get():\n return False\n else:\n return True\n\n#### create root filial\n\nif validarEmail(\"[email protected]\") == False: # mismo mail\n filialAdmin = Filial(\n codigo_filial = 'puebla',\n nombre_filial=\"puebla srl de cv\",\n )\n filialAdmin.put()\n\n\n#### create root user \n\n keyadmincol = ndb.Key(urlsafe=filialAdmin.entityKey)\n admin = Usuarios(\n filial_key = keyadmincol,\n email=\"[email protected]\", #mismo mail\n password=\"maverick\",\n \n )\n admin2 = Usuarios(\n filial_key = keyadmincol,\n email=\"[email protected]\", #mismo mail\n password=\"maverick2\",\n \n )\n admin.hash_password()\n admin.put()\n admin2.hash_password()\n admin2.put()\n" } ]
5
RLC-GTL/AC_SRU_ServerManager
https://github.com/RLC-GTL/AC_SRU_ServerManager
33096475d5f73810cba7f2b65ba1fa03c16307bc
bfe38c3b8f5d88e72a5387b745f97edfcf29481a
58220f4311df25c94fc50bee2a955a336164933e
refs/heads/master
2020-07-03T16:12:19.261339
2017-01-15T01:01:47
2017-01-15T01:01:47
74,248,851
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.59375, "alphanum_fraction": 0.6011029481887817, "avg_line_length": 20.760000228881836, "blob_id": "7bdca86b0593c2d1ab4f327931cf0388af7e7a0a", "content_id": "794e5aff1d40d8ec3eb145e188bfaa2d51fd8886", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 544, "license_type": "no_license", "max_line_length": 72, "num_lines": 25, "path": "/ServerControl_Template/stop.sh", "repo_name": "RLC-GTL/AC_SRU_ServerManager", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#\n# stops running instances started with start.sh. See start.sh for usage.\n\nIDX=\"$1\"\nM=\"$2\"\n\nif test -z \"$1\"; then\n echo \"Supply server index as argument\"\nelse\n if test -z \"$M\" -o \"$M\" = \"AC\"; then\n echo \"Stopping AC server $IDX\"\n screen -X -S acserver-$IDX quit\n fi\n if test -z \"$M\" -o \"$M\" = \"ST\"; then\n echo \"Stopping stracker $IDX\"\n screen -X -S stracker-$IDX quit\n fi\n if test -z \"$M\" -o \"$M\" = \"MR\"; then\n echo \"Stopping minorating $IDX\"\n screen -X -S minorating-$IDX quit\n fi\n sleep 2\n screen -ls\nfi\n" }, { "alpha_fraction": 0.4845474660396576, "alphanum_fraction": 0.49061810970306396, "avg_line_length": 19.0930233001709, "blob_id": "5267369fc1ee8265aa178eca945efe2a8badcb17", "content_id": "b67e043ef7c054d2458a0caa6ab687e80ed4bddf", "detected_licenses": [ "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1812, "license_type": "no_license", "max_line_length": 65, "num_lines": 86, "path": "/ServerControl_Template/stracker/http_templates/tmpl_helpers.py", "repo_name": "RLC-GTL/AC_SRU_ServerManager", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\nfrom bottle import SimpleTemplate\r\n\r\nclass MyTemplate(SimpleTemplate):\r\n def __init__(self, t, **kw):\r\n self.kw = kw\r\n SimpleTemplate.__init__(self,t)\r\n\r\n def render(self, **kw):\r\n kw.update(self.kw)\r\n return SimpleTemplate.render(self, kw)\r\n\r\ncar_info = {}\r\n\r\ndef set_car_info(new_car_info):\r\n for c in new_car_info:\r\n car_info[c] = new_car_info[c]\r\n\r\ncar_tmpl = MyTemplate(\"\"\"\\\r\n% from stracker_lib.logger import *\r\n% try:\r\n% tooltip\r\n% except:\r\n% tooltip = False\r\n% end\r\n% try:\r\n% if uicar in [None, car]:\r\n% raise RuntimeError\r\n% end\r\n% except:\r\n% uicar = car_info.get(car, {}).get('uiname', car)\r\n% end\r\n%\r\n% def longestSubString(s1, s2):\r\n% i = 0\r\n% while i < len(s1) and i < len(s2) and s1[i] == s2[i]:\r\n% i+=1\r\n% end\r\n% return s1[:i]\r\n% end\r\n%\r\n% def unbrand(car, uicar):\r\n% if uicar is None:\r\n% return car\r\n% end\r\n% if car in car_info:\r\n% brand = car_info[car].get('brand','')\r\n% if not brand is None:\r\n% ls = longestSubString(uicar.lower(), brand.lower())\r\n% else:\r\n% ls = \"\"\r\n% end\r\n% if len(ls) < 3:\r\n% ls = \"\"\r\n% end\r\n% uicar = uicar[len(ls):].strip()\r\n% else:\r\n% pass\r\n% end\r\n% return uicar\r\n% end\r\n%\r\n% def carbadge(car):\r\n% return car_info.get(car, {}).get('brand','')\r\n% end\r\n%\r\n% needsText=True\r\n% brand = carbadge(car)\r\n% if brand:\r\n<img src=\"carbadge?car={{!car}}\"\r\n% if tooltip:\r\n% needsText=False\r\n title=\"{{uicar}}\"\r\n% else:\r\n title=\"{{brand}}\"\r\n% end\r\n class=\"aids\">\r\n% end\r\n% if needsText:\r\n {{unbrand(car,uicar)}}\r\n% end\r\n% if carbadge(car):\r\n</img>\r\n% end\r\n\"\"\", **{'car_info':car_info})" }, { "alpha_fraction": 0.6494464874267578, "alphanum_fraction": 0.678966760635376, "avg_line_length": 21.58333396911621, "blob_id": "39b18e296a29f078a21493962d3fad9f033c2557", "content_id": "dbd30162484631e303b5e21968ffbeaa6285e01b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 271, "license_type": "no_license", "max_line_length": 102, "num_lines": 12, "path": "/ServerControl_Template/update.sh", "repo_name": "RLC-GTL/AC_SRU_ServerManager", "src_encoding": "UTF-8", "text": "#!/bin/sh\n#\n# script to update AC server\n# usage:\n# ./start.sh <user> <password>\n# <user> Steam user name.\n# <password> Steam user name's password.\n\nuser=\"$1\"\npw=\"$2\"\n\n~/Steam/steamcmd.sh +login \"$user\" \"$pw\" +@sSteamCmdForcePlatformType windows +app_update 302550 +quit\n" }, { "alpha_fraction": 0.44863998889923096, "alphanum_fraction": 0.4553599953651428, "avg_line_length": 43.92647171020508, "blob_id": "2bb2a2f243238c23aa284c8cc4394e9408be854e", "content_id": "38470370785b930ef2c7899d63814c0ae43c0930", "detected_licenses": [ "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3125, "license_type": "no_license", "max_line_length": 194, "num_lines": 68, "path": "/ServerControl_Template/stracker/http_templates/tmpl_chat.py", "repo_name": "RLC-GTL/AC_SRU_ServerManager", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\nfrom bottle import SimpleTemplate\r\n\r\n# ----------------------------------------------\r\n# statistics template\r\n# ----------------------------------------------\r\n\r\nchatlogTemplate = SimpleTemplate(\"\"\"\r\n<div class=\"container\">\r\n <div class=\"row page-header\">\r\n <div class=\"col-md-6\"><img src=\"/img/banner.png\" title=\"Logo Track\" class=\"ACimg\"></div>\r\n <div class=\"col-md-6\">\r\n <form class=\"form-horizontal collapse-group\" role=\"form\">\r\n <div class=\"form-group\">\r\n% if len(servers) > 1:\r\n <label for=\"servers\" class=\"col-md-2 control-label\">Server</label>\r\n <div class=\"col-md-10\">\r\n <select id=\"server\" name=\"server\" class=\"form-control multiselect\">\r\n% for s in servers:\r\n <option {{!\"selected\" if s == server else \"\"}} value=\"{{s}}\">{{s}}</option>\r\n% end\r\n </select>\r\n </div>\r\n% end\r\n <label for=\"date_from\" class=\"col-md-2 control-label\">From</label>\r\n <div class=\"col-md-3\">\r\n <input name=\"date_from\" id=\"date_from\" class=\"datepicker form-control\" data-date-format=\"yyyy-mm-dd\" value=\"{{date_from if not date_from is None else ''}}\" />\r\n </div>\r\n <label for=\"date_to\" class=\"col-md-2 control-label\">To</label>\r\n <div class=\"col-md-3\">\r\n <input name=\"date_to\" id=\"date_to\" class=\"datepicker form-control\" data-date-format=\"yyyy-mm-dd\" value=\"{{date_to if not date_to is None else ''}}\" />\r\n </div>\r\n </div>\r\n <div class=\"form-group\">\r\n <div class=\"col-md-4 col-md-offset-2\">\r\n <button class=\"form-control btn btn-sm btn-primary\">\r\n Submit\r\n </button>\r\n </div>\r\n <div class=\"col-md-4 col-md-offset-2\">\r\n <button class=\"form-control btn btn-sm btn-primary\" onClick=\"window.history.back()\">\r\n Back\r\n </button>\r\n </div>\r\n </div>\r\n </form>\r\n </div>\r\n </div>\r\n <div class=\"row\">\r\n <div class=\"col-md-12\">\r\n <table class=\"table table-responsive table-hover table-striped table-condensed\">\r\n <thead>\r\n <tr><th class=\"col-sm-2 text-right\">Timestamp</th><th class=\"col-sm-2 text-center\">Name</th><th class=\"col-sm-8\">Chat message</th></tr>\r\n </thead>\r\n <tbody\">\r\n% from ptracker_lib.helpers import *\r\n% for msg in messages:\r\n% ts = unixtime2datetime(msg['timestamp'])\r\n% ts = format_datetime(ts)\r\n <tr><td class=\"text-right\">{{ts}}</td><td class=\"text-right\"><a href=\"playerdetails?pid={{\"%d\" % msg['playerid']}}#\">{{msg['name']}}:</a></td><td>{{msg['content']}}</td></tr>\r\n% end\r\n </tbody>\r\n </table>\r\n </div>\r\n </div>\r\n</div>\r\n\"\"\")\r\n\r\n" }, { "alpha_fraction": 0.4220319092273712, "alphanum_fraction": 0.43295925855636597, "avg_line_length": 33.62105178833008, "blob_id": "2f37f8cf99a1f6ed6179e2d6b35bbab43e368627", "content_id": "db8b5abe451bacd5529081c421778147846565c9", "detected_licenses": [ "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3386, "license_type": "no_license", "max_line_length": 108, "num_lines": 95, "path": "/ServerControl_Template/stracker/http_templates/tmpl_log.py", "repo_name": "RLC-GTL/AC_SRU_ServerManager", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\nfrom bottle import SimpleTemplate\r\n\r\n# ----------------------------------------------\r\n# statistics template\r\n# ----------------------------------------------\r\n\r\nlogTemplate = SimpleTemplate(\"\"\"\r\n<script>\r\n$(function() {\r\n\r\n function update() {\r\n%if not server is None:\r\n% serverStr = \"&server=\"+server\r\n%else:\r\n% serverStr = \"\"\r\n%end\r\n%serverStr += \"&level=\"+level\r\n $.getJSON('log_stream?key={{!key}}&limit={{!str(limit) + serverStr}}', {}, function(data) {\r\n if (data.state != 'done') {\r\n if(data.content != '') {\r\n $('#log_contents').append(data.content);\r\n window.scrollTo(0,document.body.scrollHeight);\r\n }\r\n setTimeout(update, 0);\r\n }\r\n });\r\n }\r\n\r\n update();\r\n});\r\n</script>\r\n<div class=\"container\">\r\n <div class=\"row page-header\">\r\n <div class=\"col-md-6\"><img src=\"/img/banner.png\" title=\"Logo Track\" class=\"ACimg\"></div>\r\n <div class=\"col-md-6\">\r\n <form class=\"form-horizontal collapse-group\" role=\"form\">\r\n <div class=\"form-group\">\r\n% if len(servers) > 1:\r\n <label for=\"servers\" class=\"col-md-2 control-label\">Server</label>\r\n <div class=\"col-md-10\">\r\n <select id=\"server\" name=\"server\" class=\"form-control multiselect\">\r\n% for s in servers:\r\n <option {{!\"selected\" if s == server else \"\"}} value=\"{{s}}\">{{s}}</option>\r\n% end\r\n </select>\r\n </div>\r\n% end\r\n <label for=\"limit\" class=\"col-md-2 control-label\">Lines</label>\r\n <div class=\"col-md-10\">\r\n <select id=\"limit\" name=\"limit\" class=\"form-control multiselect\">\r\n% limits = set([10,20,50,100,200,1000] + [limit])\r\n% for l in sorted(list(limits)):\r\n <option {{!\"selected\" if l == limit else \"\"}} value=\"{{l}}\">{{l}}</option>\r\n% end\r\n </select>\r\n </div>\r\n\r\n <label for=\"level\" class=\"col-md-2 control-label\">Level</label>\r\n <div class=\"col-md-10\">\r\n <select id=\"level\" name=\"level\" class=\"form-control multiselect\">\r\n% levels = [\"error\", \"warning\", \"info\", \"debug\", \"unclassified\"]\r\n% for l in levels:\r\n <option {{!\"selected\" if l == level else \"\"}} value=\"{{l}}\">{{l}}</option>\r\n% end\r\n </select>\r\n </div>\r\n\r\n </div>\r\n <div class=\"form-group\">\r\n <div class=\"col-md-4 col-md-offset-2\">\r\n <button class=\"form-control btn btn-sm btn-primary\">\r\n Submit\r\n </button>\r\n </div>\r\n <div class=\"col-md-4 col-md-offset-2\">\r\n <button class=\"form-control btn btn-sm btn-primary\" onClick=\"window.history.back()\">\r\n Back\r\n </button>\r\n </div>\r\n </div>\r\n </form>\r\n </div>\r\n </div>\r\n <div class=\"row\">\r\n <div class=\"col-md-12\">\r\n <table class=\"table table-condensed\">\r\n <tbody id=\"log_contents\">\r\n </tbody>\r\n </table>\r\n </div>\r\n </div>\r\n</div>\r\n\"\"\")\r\n\r\n" }, { "alpha_fraction": 0.530990719795227, "alphanum_fraction": 0.5470961332321167, "avg_line_length": 29.582090377807617, "blob_id": "6c85ae8ee859123a3dee08e858e8e52839b462a3", "content_id": "6a85d7d150867761bc525155c45eb5c67aa657d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2049, "license_type": "no_license", "max_line_length": 159, "num_lines": 67, "path": "/ServerControl_Template/start.sh", "repo_name": "RLC-GTL/AC_SRU_ServerManager", "src_encoding": "UTF-8", "text": "/bin/bash\n#\n# script to start AC server, minorating and stracker\n# usage:\n# ./start.sh <idx> [<module>]\n# <idx> the server instance , e.g. 2017_SRU_ACL\n# <module> optional specify the module to be started (if omitted all modules are started). Must be one of AC, MR or ST.\n\nIDX=\"$1\"\nM=\"$2\"\n\nCFG=\"cfg\"\n\nif test -z \"$1\" -o '!' -d \"$cfg/$IDX\"; then\n echo \"Index of server missing or invalid\"\nelse\n ./stop.sh \"$1\" \"$2\"\n\n if test -z \"$M\" -o \"$M\" = \"ST\"; then\n # stracker\n echo \"Starting stracker $IDX\"\n cd stracker\n screen -S stracker-$IDX -d -m ./stracker_linux_x86/stracker --stracker_ini=../\"$cfg/$IDX\"/stracker.ini\n cd ..\n fi\n if test -z \"$M\" -o \"$M\" = \"MR\"; then\n # minorating\n echo \"Starting minorating $IDX\"\n rm \"$cfg/$IDX\"/*.exe \"$cfg/$IDX\"/*.dll \"$cfg/$IDX\"/*.pdb\n ln minorating/*.exe minorating/*.pdb minorating/*.dll \"$cfg/$IDX\"\n cd \"$cfg/$IDX\"\n test -f screenlog.0 && rm screenlog.0\n mv ../logs/minorating$IDX.log screenlog.0\n echo \"-------------------------------------\" >> screenlog.0\n echo \"RESTART\" >> screenlog.0\n echo \"-------------------------------------\" >> screenlog.0\n screen -S minorating-$IDX -d -m -L mono MinoRatingPlugin.exe\n sleep 2\n ln screenlog.0 ../logs/minorating$IDX.log\n cd ..\n fi\n\n if test -z \"$M\" -o \"$M\" = \"AC\"; then\n # ac server\n echo \"Starting AC server $IDX\"\n cd acserver\n test -d wd$IDX || mkdir wd$IDX\n cd wd$IDX\n ln -sf ../content .\n ln -sf ../system .\n test -d results || mkdir results\n chmod 0755 results\n test -f screenlog.0 && rm screenlog.0\n mv ../../logs/acserver$IDX.log screenlog.0\n echo \"-------------------------------------\" >> screenlog.0\n echo \"RESTART\" >> screenlog.0\n echo \"-------------------------------------\" >> screenlog.0\n ionice -n 0 screen -S acserver-$IDX -d -m -L sh -c \"../acServer -c=../../\"$cfg/$IDX\"/server_cfg.ini -e=../../\"$cfg/$IDX\"/entry_list.ini | ts \\\\\\'{%F %T}:'\"\n sleep 2\n ln screenlog.0 ../../logs/acserver$IDX.log\n cd ..\n fi\n\n sleep 2\n\n screen -ls\nfi\n" }, { "alpha_fraction": 0.517982006072998, "alphanum_fraction": 0.5244755148887634, "avg_line_length": 43.45454406738281, "blob_id": "88a9c1b99184f819bc51c9881d985890133a2adb", "content_id": "b83b0d65233deb5674ffca650ddebf18a11ac67f", "detected_licenses": [ "LicenseRef-scancode-warranty-disclaimer" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2002, "license_type": "no_license", "max_line_length": 108, "num_lines": 44, "path": "/ServerControl_Template/stracker/http_templates/tmpl_mainpage.py", "repo_name": "RLC-GTL/AC_SRU_ServerManager", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\nfrom bottle import SimpleTemplate\r\n\r\n# ----------------------------------------------\r\n# session statistics template\r\n# ----------------------------------------------\r\n\r\nmainPageTemplate = SimpleTemplate(\"\"\"\r\n<div class=\"container\">\r\n <div class=\"row page-header\">\r\n <div class=\"col-md-6\"><img src=\"/img/banner.png\" title=\"Logo Track\" class=\"ACimg\">\r\n </div>\r\n <div class=\"col-md-6\">\r\n </div>\r\n </div>\r\n <div class=\"row\">\r\n <div class=\"col-md-12\">\r\n <h1>Welcome to the stracker statistics query</h1>\r\n <h2>What is stracker</h2>\r\n <p><i>stracker</i> is a service supporting Assetto Corsa servers with automatically\r\n generated statistics.</p>\r\n <h2>Prerequisites</h2>\r\n <p>While <i>stracker</i> works as a standalone server-side application,\r\n Assetto Corsa drivers are encouraged to use the app <i>ptracker</i> which\r\n offers the following benefits:\r\n <ul>\r\n <li>in-game access to the most important statistics pages</li>\r\n <li>sending setups between drivers</li>\r\n <li>automaticall save personal best setups</li>\r\n <li>optional leaderboard display with various delta methods</li>\r\n <li>optional live delta display to your fastest lap or any stored lap in the server</li>\r\n <li>optional display of <i>stracker</i> messages</li>\r\n <li>send detailed lap information to <i>stracker</i></li>\r\n </ul>\r\n </p>\r\n <h2>Project homepages</h2>\r\n <p><a href=\"http://n-e-y-s.de\">ptracker and stracker homepage</a><br>\r\n <a href=\"http://n-e-y-s.de/ptracker_doc\">ptracker documentation and FAQ</a><br>\r\n <a href=\"http://n-e-y-s.de/stracker_doc\">stracker documentation and FAQ</a></p>\r\n </div>\r\n </div>\r\n</div>\r\n\"\"\")\r\n\r\n" } ]
7
hkorzeniewski/Flask-activity-registration
https://github.com/hkorzeniewski/Flask-activity-registration
2a4fe0d6d9591c37c5acfa6ce0a4acc799f5fa9b
bb11edfd5f2fbda38ca71c79fd7c26675b012995
5de91b0a4ea332e7bb636b7f11c163ce21b52272
refs/heads/main
2023-06-02T10:22:41.102443
2021-06-26T00:23:58
2021-06-26T00:23:58
344,303,366
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6563585996627808, "alphanum_fraction": 0.6591382622718811, "avg_line_length": 32.87058639526367, "blob_id": "1c63961a01e0cdd8ada7a6325e3e22a5f041158e", "content_id": "77bd840aaa00aa6e43b9a8a703fb585fce02267a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2878, "license_type": "no_license", "max_line_length": 132, "num_lines": 85, "path": "/website/views.py", "repo_name": "hkorzeniewski/Flask-activity-registration", "src_encoding": "UTF-8", "text": "from flask import Blueprint, render_template, request, flash, jsonify,redirect\nfrom flask_login import login_user, login_required, logout_user, current_user\nfrom .models import Activity, Cardio\nfrom . import db\nimport json\n\nviews = Blueprint('views', __name__)\n\n\[email protected]('/', methods = ['GET'])\n@login_required\ndef home():\n return render_template(\"home.html\", user=current_user)\n\[email protected]('/activities', methods=['GET','POST'])\n@login_required\ndef activities():\n\n return render_template(\"activities.html\", user=current_user)\n\[email protected]('/cardio', methods=['GET','POST'])\n@login_required\ndef cardio():\n \n if request.method =='POST':\n cardio_name = request.form.get('cardio_name')\n place = request.form.get('place')\n distance = request.form.get('distance')\n duration = request.form.get('duration')\n\n if len(cardio_name) < 1:\n flash('Name of activity is too short', category='error')\n else:\n new_cardio = Cardio(cardio_name=cardio_name, place=place, distance=distance, duration=duration, user_id=current_user.id)\n \n db.session.add(new_cardio)\n db.session.commit()\n flash('Cardio added', category='Success')\n\n return render_template(\"cardio.html\", user=current_user)\n\[email protected]('/activity', methods=['GET','POST'])\n@login_required\ndef activity():\n \n if request.method =='POST':\n activity_name = request.form.get('activity_name')\n duration = request.form.get('duration')\n description = request.form.get('description')\n\n if len(activity_name) < 1:\n flash('Name of activity is too short', category='error')\n else:\n new_activity = Activity(activity_name=activity_name,duration=duration, description=description, user_id=current_user.id)\n \n db.session.add(new_activity)\n db.session.commit()\n flash('Activity added', category='Success')\n \n return render_template(\"activity.html\", user=current_user)\n\n\[email protected]('/delete-activity/<int:id>', methods=['POST'])\ndef delete_activity(id):\n activity_delete = Activity.query.get_or_404(id)\n db.session.delete(activity_delete)\n db.session.commit()\n return redirect('/')\n\[email protected]('/delete-cardio/<int:id>', methods=['POST'])\ndef delete_cardio(id):\n cardio_delete = Cardio.query.get_or_404(id)\n db.session.delete(cardio_delete)\n db.session.commit()\n return redirect('/')\n\[email protected]('/activity/<int:id>')\ndef detail_activity(id):\n activity_detail = Activity.query.get(id)\n return render_template(\"activity-detail.html\", user=current_user, activity=activity_detail)\n\[email protected]('/cardio/<int:id>')\ndef detail_cardio(id):\n cardio_detail = Cardio.query.get(id)\n return render_template(\"cardio-detail.html\", user=current_user, cardio=cardio_detail)" }, { "alpha_fraction": 0.6181818246841431, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 22.571428298950195, "blob_id": "9040bd0f6c0544f466105e24c8df5d486fc22193", "content_id": "90d6ed1c2c72dd051a8761866fa8181d464de7e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 660, "license_type": "no_license", "max_line_length": 89, "num_lines": 28, "path": "/migrations/versions/a6889e26a94c_.py", "repo_name": "hkorzeniewski/Flask-activity-registration", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: a6889e26a94c\nRevises: 4aad4f7b688b\nCreate Date: 2021-03-05 19:41:24.581276\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a6889e26a94c'\ndown_revision = '4aad4f7b688b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('cardio', sa.Column('date', sa.DateTime(timezone=True), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('cardio', 'date')\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.6347032189369202, "alphanum_fraction": 0.6727549433708191, "avg_line_length": 22.464284896850586, "blob_id": "cb65b8461d3cb9da778337cace7a4b3c21acc8d8", "content_id": "d6728d0ff4fc9b8e6e48151b20aa140c3771b948", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 657, "license_type": "no_license", "max_line_length": 81, "num_lines": 28, "path": "/migrations/versions/4aad4f7b688b_.py", "repo_name": "hkorzeniewski/Flask-activity-registration", "src_encoding": "UTF-8", "text": "\"\"\"empty message\n\nRevision ID: 4aad4f7b688b\nRevises: c8b93fe68601\nCreate Date: 2021-03-05 19:29:37.355979\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4aad4f7b688b'\ndown_revision = 'c8b93fe68601'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('activity', sa.Column('description', sa.Text(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('activity', 'description')\n # ### end Alembic commands ###\n" }, { "alpha_fraction": 0.8363636136054993, "alphanum_fraction": 0.8363636136054993, "avg_line_length": 35.66666793823242, "blob_id": "7a03320697d89e3d73521ae33dcdaae880c3cb09", "content_id": "f051b5a52873e7c5d80e9650c50f84ca6154dbf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 110, "license_type": "no_license", "max_line_length": 78, "num_lines": 3, "path": "/README.md", "repo_name": "hkorzeniewski/Flask-activity-registration", "src_encoding": "UTF-8", "text": "# Flask-activity-registration\n\nApplication written in framework flask in which we can register our activities\n" }, { "alpha_fraction": 0.8064516186714172, "alphanum_fraction": 0.8064516186714172, "avg_line_length": 31, "blob_id": "24fd108590c4fa87d4c03be9ece56c9c6d5b3ffb", "content_id": "67d39c08c600b0e317de515b8df868e1df054dda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31, "license_type": "no_license", "max_line_length": 31, "num_lines": 1, "path": "/secrets.py", "repo_name": "hkorzeniewski/Flask-activity-registration", "src_encoding": "UTF-8", "text": "SECRET_KEY = \"SouthernIcyOcean\"" }, { "alpha_fraction": 0.6615824699401855, "alphanum_fraction": 0.6787416338920593, "avg_line_length": 33.900001525878906, "blob_id": "12eeb399d8b76167fc7b297d00666f75b326dac6", "content_id": "86910ad6862ba87a3b2bfbf878dedce36c48d5cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1049, "license_type": "no_license", "max_line_length": 68, "num_lines": 30, "path": "/website/models.py", "repo_name": "hkorzeniewski/Flask-activity-registration", "src_encoding": "UTF-8", "text": "from . import db\nfrom flask_login import UserMixin\nfrom sqlalchemy.sql import func\n\n\nclass User(db.Model, UserMixin):\n id = db.Column(db.Integer, primary_key = True)\n name = db.Column(db.String(150), unique=True)\n password = db.Column(db.String(150))\n activities = db.relationship('Activity')\n cardios = db.relationship('Cardio')\n \n\nclass Activity(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n activity_name = db.Column(db.String(150))\n duration = db.Column(db.Float)\n date = db.Column(db.DateTime(timezone=True), default=func.now())\n description = db.Column(db.String(200))\n user_id = db.Column(db.Integer, db.ForeignKey('user.id')) \n\n\nclass Cardio(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n cardio_name = db.Column(db.String(150))\n place = db.Column(db.String(100))\n distance = db.Column(db.Integer)\n duration = db.Column(db.Float)\n date = db.Column(db.DateTime(timezone=True), default=func.now())\n user_id = db.Column(db.Integer, db.ForeignKey('user.id')) \n\n" } ]
6
ibecse/python
https://github.com/ibecse/python
ded4e8bedc78b4bf59e8aae9a8a34b7d99f2faff
90eb5b1fbb7aebff50ee3caf721110dba6b43ab8
903189b7de5cb10adf872863a9a6d1d43f9d2d8d
refs/heads/master
2021-01-21T14:00:57.703898
2019-08-21T16:29:13
2019-08-21T16:29:13
91,817,734
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.620215892791748, "alphanum_fraction": 0.6290481090545654, "avg_line_length": 30.78125, "blob_id": "f27001f024f4dbb4ff1ece8d44ef95526d7b7b41", "content_id": "103fd68c93113912d4fa405b5f9e66781d778bed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1047, "license_type": "no_license", "max_line_length": 74, "num_lines": 32, "path": "/wechat_friend.py", "repo_name": "ibecse/python", "src_encoding": "UTF-8", "text": "from wxpy import *\nimport openpyxl\n\nbot = Bot(cache_path=True)\nmy_friends = bot.friends()\nprint(\"好友数量:{}\".format(len(my_friends)))\nlis = [['NickName', 'RemarkName', 'Sex', 'Province', 'City', 'Signature']]\n\nfor friend in my_friends:\n NickName = friend.raw.get('NickName', None)\n RemarkName = friend.raw.get('RemarkName', None)\n Sex = {1: \"男\", 2: \"女\", 3: \"其他\"}.get(friend.raw.get('Sex', None), None)\n City = friend.raw.get('City', None)\n Province = friend.raw.get('Province', None)\n Signature = friend.raw.get('Signature', None)\n list_0 = [NickName, RemarkName, Sex, Province, City, Signature]\n lis.append(list_0)\n\n\ndef list_excel(filename, lis):\n workbook = openpyxl.Workbook()\n sheet = workbook.active\n sheet.title = 'WechatFriends'\n file_name = filename + '.xlsx'\n for i in range(0, len(lis)):\n for j in range(0, len(lis[i])):\n sheet.cell(row=i+1, column=j+1, value=str(lis[i][j]))\n workbook.save(file_name)\n print(\"写入成功!\")\n\n\nlist_excel('wechat', lis)\n\n\n" }, { "alpha_fraction": 0.6732673048973083, "alphanum_fraction": 0.6732673048973083, "avg_line_length": 10.454545021057129, "blob_id": "1a02a595f30a00360885403de28b2b452fe18ed9", "content_id": "f51ca3d9f150cfd95b02aa1ec5b1c0332ce672bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 29, "num_lines": 44, "path": "/lpthw/ex45_game.py", "repo_name": "ibecse/python", "src_encoding": "UTF-8", "text": "class Address(object):\n pass\n\nclass Transportation(object):\n pass\n\nclass Time(object):\n pass\n\nclass Fail(object):\n pass\n\nclass Win(object):\n pass\n\nclass Home(Address):\n pass\n\nclass BusStation(Address):\n pass\n\nclass SubwayStation(Address):\n pass\n\nclass CBD(Address):\n pass\n\nclass Bike(Transportation):\n pass\n\nclass Bus(Transportation):\n pass\n\nclass Subway(Transportation):\n pass\n\nclass SevenAm(Time):\n pass\n\nclass NineAm(Time):\n pass\n\nclass SixPm(Time):\n pass\n\n" }, { "alpha_fraction": 0.6605504751205444, "alphanum_fraction": 0.6972476840019226, "avg_line_length": 26.25, "blob_id": "aa4c4d1aafc99cd2df7437be70cd21f107c1eed5", "content_id": "07bb54f42dbb7c6fcae377d08c60488b70588f94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 143, "license_type": "no_license", "max_line_length": 35, "num_lines": 4, "path": "/BIT-268001/WeekNamePrintV2.py", "repo_name": "ibecse/python", "src_encoding": "UTF-8", "text": "# WeekNamePrintV2.py\nWeekStr = \"一二三四五六日\"\nWeekId = eval(input(\"请输入星期数字1-7:\"))\nprint(\"星期\" + WeekStr[WeekId-1])\n" }, { "alpha_fraction": 0.31578946113586426, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 8.5, "blob_id": "c30efd612e93377605f34d4f88c3178db5582cbe", "content_id": "cb18a0ea17d339a1de3b5dec46652b49bedff947", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 19, "license_type": "no_license", "max_line_length": 9, "num_lines": 2, "path": "/README.md", "repo_name": "ibecse/python", "src_encoding": "UTF-8", "text": "# python\n2017/5/26\n" }, { "alpha_fraction": 0.508474588394165, "alphanum_fraction": 0.5847457647323608, "avg_line_length": 18.66666603088379, "blob_id": "a524c42c659c62b7146fbbf2b4d13c02204a85ef", "content_id": "0f0f45391fb3a9c8c5e7953b9bb65164b4f9ae55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 34, "num_lines": 6, "path": "/BIT-268001/CircleArea.py", "repo_name": "ibecse/python", "src_encoding": "UTF-8", "text": "# CircleArea.py\nr = input(\"请输入圆的半径(米):\")\nr = eval(r)\nPi = 3.1415926\nS = Pi * r * r\nprint(\"圆的面积为:{:.2f}平方米\".format(S))\n" }, { "alpha_fraction": 0.6642857193946838, "alphanum_fraction": 0.7071428298950195, "avg_line_length": 27, "blob_id": "fd8077a2f04340342fffda63336a6493aa169778", "content_id": "60b10741a7396e5a0322ce231322d1e3079e0e03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/BIT-268001/WeekNamePrintV1.py", "repo_name": "ibecse/python", "src_encoding": "UTF-8", "text": "# WeekNamePrintV1.py\nWeekStr = \"星期一星期二星期三星期四星期五星期六星期日\"\nWeekId = eval(input(\"请输入星期数字1-7:\"))\npos = (WeekId - 1) * 3\nprint(WeekStr[pos:pos+3])\n" }, { "alpha_fraction": 0.40875911712646484, "alphanum_fraction": 0.4678831994533539, "avg_line_length": 18.826086044311523, "blob_id": "d462d26db50bb27c225df19994932acb066acfe4", "content_id": "93d3aac572ba2517466d455461e815ba51bccb2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1388, "license_type": "no_license", "max_line_length": 44, "num_lines": 69, "path": "/BIT-268001/SevenDigitDraw.py", "repo_name": "ibecse/python", "src_encoding": "UTF-8", "text": "#\nimport turtle\n\n\ndef draw_line(draw):\n if draw:\n turtle.pendown()\n else:\n turtle.penup()\n turtle.fd(50)\n turtle.right(90)\n turtle.penup()\n\n\ndef draw_digit(digit):\n if digit in [2, 3, 4, 5, 6, 8, 9]:\n draw_line(True)\n else:\n draw_line(False)\n if digit in [0, 1, 3, 4, 5, 6, 7, 8, 9]:\n draw_line(True)\n else:\n draw_line(False)\n if digit in [0, 2, 3, 5, 6, 8, 9]:\n draw_line(True)\n else:\n draw_line(False)\n if digit in [0, 2, 6, 8]:\n draw_line(True)\n else:\n draw_line(False)\n turtle.left(90)\n if digit in [0, 4, 5, 6, 8, 9]:\n draw_line(True)\n else:\n draw_line(False)\n if digit in [0, 2, 3, 5, 6, 7, 8, 9]:\n draw_line(True)\n else:\n draw_line(False)\n if digit in [0, 1, 2, 3, 4, 7, 8, 9]:\n draw_line(True)\n else:\n draw_line(False)\n\n\ndef main():\n s = input(\"请输入日期yyyymmdd:\")\n turtle.screensize(1000, 600)\n turtle.penup()\n turtle.fd(-400)\n n = 0\n for i in s:\n draw_digit(eval(i))\n turtle.right(180)\n turtle.fd(50)\n n += 1\n if n == 4:\n turtle.write(\"年\")\n turtle.fd(50)\n if n == 6:\n turtle.write(\"月\")\n turtle.fd(50)\n if n == 8:\n turtle.write(\"日\")\n turtle.fd(50)\n\n\nmain()\n\n\n" }, { "alpha_fraction": 0.5390625, "alphanum_fraction": 0.6015625, "avg_line_length": 20.33333396911621, "blob_id": "f3975bb361c20d2f22174af176524fbc8d942ca1", "content_id": "3dfcbbb0c5615af95b2c13adc652c10b3ab5d47c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 128, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/BIT-268001/TextProBarV2.py", "repo_name": "ibecse/python", "src_encoding": "UTF-8", "text": "# TextProBarV2.py\nimport time\nscale = 100\nfor i in range(scale + 1):\n print(\"\\r{:3}%\".format(i), end=\"\")\n time.sleep(0.1)\n" }, { "alpha_fraction": 0.5448718070983887, "alphanum_fraction": 0.6346153616905212, "avg_line_length": 30.200000762939453, "blob_id": "6001f3b372502ab816b49da1570c696af434b481", "content_id": "e3ff457c109203fb2b785a8e9b9ffd23fdc3d80e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 51, "num_lines": 5, "path": "/BIT-268001/DayDayUpQ2.py", "repo_name": "ibecse/python", "src_encoding": "UTF-8", "text": "# DayDayUpQ2.py\ndayfactor = 0.01\ndayup = pow((1 + dayfactor), 365)\ndaydown = pow ((1 - dayfactor), 365)\nprint(\"向上:{:.2f},向下:{:.2f}\".format(dayup, daydown))\n" }, { "alpha_fraction": 0.3678756356239319, "alphanum_fraction": 0.3816925585269928, "avg_line_length": 31.22222137451172, "blob_id": "9b7cea83d8ad76ad11f689c44dbcc78b7287f694", "content_id": "9f7551f2ba5a4ade6c5992f052aa702192f8c1d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 579, "license_type": "no_license", "max_line_length": 41, "num_lines": 18, "path": "/BIT-268001/Untitled-1.py", "repo_name": "ibecse/python", "src_encoding": "UTF-8", "text": "Str = input()\nfor i in range(len(Str)):\n if i == '+':\n M = eval(Str[:Str.find('+')])\n N = eval(Str[Str.find('+') + 1:])\n print(\"{:.2f}\".format(M + N))\n if i == '-':\n M = eval(Str[:Str.find('-')])\n N = eval(Str[Str.find('-') + 1:])\n print(\"{:.2f}\".format(M - N))\n if i == '*':\n M = eval(Str[:Str.find('*')])\n N = eval(Str[Str.find('*') + 1:])\n print(\"{:.2f}\".format(M * N))\n if i == '/':\n M = eval(Str[:Str.find('/')])\n N = eval(Str[Str.find('/') + 1:])\n print(\"{:.2f}\".format(M / N))" }, { "alpha_fraction": 0.5462686419487, "alphanum_fraction": 0.6029850840568542, "avg_line_length": 24.769229888916016, "blob_id": "a62da15760b061de791e330d31af40f52c1fb20d", "content_id": "43318e033221bc609e11a1dde61541d6bdb3d544", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "no_license", "max_line_length": 55, "num_lines": 13, "path": "/BIT-268001/CalPiV2.py", "repo_name": "ibecse/python", "src_encoding": "UTF-8", "text": "# CalPiV2.py\nfrom random import random\nfrom time import perf_counter\nDARTS = 1000 * 1000\nhits = 0.0\nstart = perf_counter()\nfor i in range(DARTS):\n x, y = random(), random()\n if pow((x**2 + y**2), 0.5) <= 1:\n hits += 1\npi = 4 * (hits/DARTS)\nprint(\"Pi={}\".format(pi))\nprint(\"RunTime={:.5f}s\".format(perf_counter() - start))\n" } ]
11
haw230/dynamic-fibonacci
https://github.com/haw230/dynamic-fibonacci
f7526e65ac19a5b94ff58fd44e505e03e3601f5c
268a8cc427eac90b01e9368bd79468bb5f34d9e3
13e0396f7c42d5e207476bc580b69109a245f3a9
refs/heads/master
2021-01-02T09:09:56.443891
2017-08-03T20:38:54
2017-08-03T20:38:54
99,148,560
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5341399908065796, "alphanum_fraction": 0.5678479075431824, "avg_line_length": 36.95082092285156, "blob_id": "e8d6e91afa9e95b4c3adb276bfd7999963ebe37c", "content_id": "8f086ae898a4a07d3f9a9b91a3d80fe808570486", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2440, "license_type": "no_license", "max_line_length": 135, "num_lines": 61, "path": "/tests/test_main.py", "repo_name": "haw230/dynamic-fibonacci", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\n\nsys.path.insert(0, '/home/ubuntu/workspace/dynamic_fibonacci') #change this to correct folder name\nsys.path.insert(0, '/home/ubuntu/workspace/solution')\n\nfrom main import fib #change to proper function name\nfrom solution import solved_fib #change to proper function name\nfrom time import sleep\nfrom random import sample\n\nclass aethetics(object):\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n END = '\\033[0m'\n BOLD = '\\033[1m'\n LINE = '\\033[95m———————————————————————————————————————————————————————————————\\033[0m'\n\nclass TestCases(object):\n def __init__(self):\n self.passed_tests = 0\n self.total_tests = 0\n\n def tests(self): #add tests here\n f1,f2 = fib, solved_fib #change to proper function names\n self.test(f1, f2, 4)\n self.test(f1, f2, 5)\n self.test(f1, f2, 6)\n self.test(f1, f2, 7)\n self.test(f1, f2, 8)\n self.test(f1, f2, 9)\n self.test(f1, f2, 30)\n self.test(f1, f2, 60)\n \n def test(self, func1, func2, n):\n x, y = func1(n), func2(n)\n if(x == y):\n print(aethetics.GREEN + 'Test passed with param of ' + aethetics.BLUE + str(n) + aethetics.END + '\\n' + aethetics.END)\n print(aethetics.BOLD + str(x) + aethetics.END + ' matches the answer ' + aethetics.BOLD + str(y) + aethetics.END)\n self.passed_tests += 1\n else:\n print(aethetics.FAIL + 'Test failed with param of ' + aethetics.BLUE + str(n) + aethetics.END + '\\n' + aethetics.END)\n print(aethetics.BOLD + str(x) + aethetics.END + ' does not match the answer ' + aethetics.BOLD + str(y) + aethetics.END)\n self.total_tests += 1\n print(aethetics.LINE + '\\n')\n sleep(0.7)\n \n def end(self):\n if(self.passed_tests == self.total_tests):\n print(aethetics.GREEN + 'All ' + str(self.total_tests) + ' tests passed.' + aethetics.END)\n else:\n print(aethetics.WARNING + 'Passed ' + str(self.passed_tests) + ' of ' + str(self.total_tests) + ' tests.' + aethetics.END)\n\nif(__name__ == '__main__'):\n print('\\n' + aethetics.GREEN + \"Running Tests...\" + aethetics.END)\n print(aethetics.LINE + '\\n')\n t = TestCases()\n t.tests()\n t.end()" }, { "alpha_fraction": 0.4712643623352051, "alphanum_fraction": 0.49425286054611206, "avg_line_length": 18.44444465637207, "blob_id": "46cb6b04b190c12e6944a8ebeb8e9aebd5559a45", "content_id": "2c552cbb6f1bc8ccc0a86eec5b40ee5f2b9ea99d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 51, "num_lines": 9, "path": "/solution/solution.py", "repo_name": "haw230/dynamic-fibonacci", "src_encoding": "UTF-8", "text": "memo = {}\n\ndef solved_fib(n):\n if(n < 2):\n return 1\n if(n in memo):\n return memo[n]\n memo[n] = solved_fib(n - 1) + solved_fib(n - 2)\n return memo[n]" }, { "alpha_fraction": 0.7719298005104065, "alphanum_fraction": 0.7982456088066101, "avg_line_length": 56, "blob_id": "3d9b8fe65d510f71267bb0e4ea5c77e23363a48f", "content_id": "54f5ea2695d93ad823db9d1482bff811c584e946", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 114, "license_type": "no_license", "max_line_length": 93, "num_lines": 2, "path": "/README.md", "repo_name": "haw230/dynamic-fibonacci", "src_encoding": "UTF-8", "text": "# Dynamic Fibonacci\nDynamic Fibonacci for [The Anadromi Project](https://github.com/haw230/the-anadromi-project).\n" }, { "alpha_fraction": 0.7358490824699402, "alphanum_fraction": 0.7358490824699402, "avg_line_length": 52, "blob_id": "70962709325ab2ddc87df07aad8a7e89891825de", "content_id": "33e0cce953c1caed0df6fc1c5d7d7cf95096967a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "no_license", "max_line_length": 52, "num_lines": 1, "path": "/solution/__init__.py", "repo_name": "haw230/dynamic-fibonacci", "src_encoding": "UTF-8", "text": "#obligatory __init__.py so Python knows to look here\n" } ]
4
veit-schiele-communications/datenanalyse-in-python
https://github.com/veit-schiele-communications/datenanalyse-in-python
7d20a119a18a7ec1626639e803a925350f0ec7b6
0d316b635c6538fa8e66111d20044fa381276150
6af129c187853d611195242bbfb11de74ab59ba7
refs/heads/master
2021-01-18T13:07:49.588437
2020-03-03T21:27:24
2020-03-03T21:27:24
80,728,207
3
2
null
null
null
null
null
[ { "alpha_fraction": 0.6172839403152466, "alphanum_fraction": 0.6419752836227417, "avg_line_length": 19.1875, "blob_id": "18c566235c1b7c79e1f0ad37ddcc39b19d356c06", "content_id": "16e30ebbd9d42fca4e0741667d1ff986265335d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "no_license", "max_line_length": 62, "num_lines": 16, "path": "/datenvisualisierung/heatmap.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\nimport pylab as plt\nimport numpy as np\n\na = np.arange(-4, 4, 0.1)\nx, y = np.meshgrid(a, a)\nmatrix = np.sin(x**2 + y**2)\n\nplt.figure()\n\nplt.imshow(matrix, cmap=plt.cm.plasma) # gray, hot, magma, ..\nplt.colorbar()\n\nplt.title(\"Heatmap der Funktion $\\sin{(x^2 + y^2)}$\")\n\nplt.savefig(\"heatmap.png\")\nplt.savefig(\"heatmap.svg\")\n" }, { "alpha_fraction": 0.7061293125152588, "alphanum_fraction": 0.719563364982605, "avg_line_length": 22.780000686645508, "blob_id": "b9a5417607446a61d39bca17225570b3a8f105cd", "content_id": "ffe42dec8f8ff9652c759a1177c446915b66d739", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1199, "license_type": "no_license", "max_line_length": 169, "num_lines": 50, "path": "/projekt_songtexte/teil2_songs_extrahieren.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Teil 2: Songs auslesen\n\n## Aufgabe 2.1\n\nErstelle eine neue Python-Datei `songs_auslesen.py`.\n\n## Aufgabe 2.2\n\nLies die in Aufgabe 1.4 gespeicherte HTML-Datei ein.\n\n## Aufgabe 2.3\n\nBetrachte die HTML-Datei in einem Texteditor. Finde heraus, wo in der Datei die Songtitel und Links in der Seite sind und woran das Programm diese Stellen erkennen kann.\n\n## Aufgabe 2.4\n\nSchreibe ein Programm, das alle Links zu songs aus der Seite ausliest und in einer Liste sammelt, z.B.:\n\n [\n 'madonna/frozen.html',\n 'madonna/burningup.html',\n ..\n ]\n\nMögliche Ansätze:\n\n* `string.find`\n* `string.split`\n* Position im String (falls sie immer die gleiche ist)\n* Reguläre Ausdrücke\n\n#### Hinweis:\n\nWenn in der Seite bestimmte Sonderzeichen vorkommen, mußt Du die Datei öffnen mit:\n\n f = open(dateiname, 'r', encoding='utf-8')\n\n## Aufgabe 2.5\n\nGib alle Links auf dem Bildschirm aus und prüfe nach, ob das Programm korrekt arbeitet.\n\n## Aufgabe 2.6\n\nVerpacke den bisher geschriebenen Code in eine Funktion:\n\n def titel_auslesen(dateiname):\n # Dein Code kommt hierhin\n return songtitel\n\nRufe die Funktion im Programm aus und stelle sicher, daß es immer noch funktioniert.\n\n" }, { "alpha_fraction": 0.6518987417221069, "alphanum_fraction": 0.6782700419425964, "avg_line_length": 34.11111068725586, "blob_id": "c834efe6b1d1b9a900e2e30302d1145b62b7e8df", "content_id": "06bfbce6007800df9b04590027b4be1b603d26cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 950, "license_type": "no_license", "max_line_length": 210, "num_lines": 27, "path": "/datenaggregation/beispiele_gruppen.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\ndf = pd.read_csv('grosse_laender_2015.csv', index_col=0)\n\ndf['population'] = df['population'] / 1000000)\ndf['population'] = df['population'].apply(round)\n\n# 1. nach einer Spalte\ng1 = df.groupby('continent')\n\n# 2. nach einem Array gleicher Länge\nindustrialized = np.array([False, True, True, True, False, True, True, False, False, False, True, True])\ng2 = df.groupby(industrialized)\n\n# 3. nach einem Dictionary mit Schlüsseln auf den Index\nlanguage = {'Bangladesh':'HD', 'Brazil':'PT', 'China':'CN', 'India':'HD', 'Indonesia':'ID', 'Japan':'JP', 'Mexico':'ES', 'Nigeria':'NG', 'Pakistan':'AR', 'Philippines':'PP', 'Russia':'RU', 'United States':'EN'}\ng3 = df.groupby(language)\n\n# 4. mit einer Funktion\ng4 = df.groupby(len)\n\n# 5. mit einer Liste der obigen\ng5 = df.groupby(['continent', language, len])\n\n# 6. entlang der X-Achse gruppieren\ng6 = df[['population', 'fertility']].transpose().groupby(len, axis=1)\n" }, { "alpha_fraction": 0.7262969613075256, "alphanum_fraction": 0.742397129535675, "avg_line_length": 27.615385055541992, "blob_id": "ff83ca844662d90e7ac76ea31e35fb43355c628c", "content_id": "c213da4fb730c5a953668c1c7a882f193cb40967", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1126, "license_type": "no_license", "max_line_length": 185, "num_lines": 39, "path": "/datenvisualisierung/animation.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Animierter Scatterplot\n\nIn dieser Übung werden wir versuchen, die berühmte Animation von Hans Rosling nachzustellen (siehe [https://www.youtube.com/watch?v=jbkSRLYSojo](https://www.youtube.com/watch?v=jbkSRLYSojo)).\n\nDazu erstellen wir eine Serie von Bildern, die wir zu einem animierten GIF verbinden.\n\n### Schritt 1\n\nErstelle einen Scatterplot für die Korrelation zwischen Lebenserwartung und Fruchtbarkeit wie in der ersten Übung für *jedes* Jahr zwischen 1960 und 2015\n(davor sind die Daten sehr lückenhaft).\n\n\n### Schritt 2\n\nSpeichere jeden Scatterplot als eigene Datei mit der Jahreszahl im Dateinamen ab, z.B. `lifeexp_.\n\n\n### Schritt 3\n\nInstalliere das Python-Modul `imageio`, indem Du auf der Kommandozeile eingibst:\n\n pip install imageio\n\n(kein Problem unter Linux/Mac, unter Windows mußt Du eventuell vorher Anaconda installieren.)\n\n\n### Schritt 4\n\nPasse das folgende Skript an und führe es aus:\n\n import imageio\n\n images = []\n\n for i in range(0, 100):\n filename = 'vortex_{}.png'.format(i)\n images.append(imageio.imread(filename))\n\n imageio.mimsave('output.gif', images, fps=20)\n\n" }, { "alpha_fraction": 0.7266099452972412, "alphanum_fraction": 0.7339003682136536, "avg_line_length": 24.71875, "blob_id": "65d163e62782ca6f66bcda232abbbde68815a4fe", "content_id": "5328dee52d2beb82775f283bc32dde47068dee0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 827, "license_type": "no_license", "max_line_length": 152, "num_lines": 32, "path": "/datenaufbereitung/daten_import_export.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "# Import / Export von Daten\n\n### Aufgabe 1\n\nLies die Datei `gapminder_total_fertility.csv` mit der Funktion `pd.read_csv` in ein DataFrame ein.\n\n### Aufgabe 2\n\nLies die Datei `gapminder_population.xlsx` mit der Funktion `pd.read_excel` in ein DataFrame ein.\n\n### Aufgabe 3\n\nLies die Datei `gapminder_gdp_per_capita.json` mit der Funktion `pd.read_json` in ein DataFrame ein.\n\n### Aufgabe 4\n\nBetrachte die Optionen der Funktion `pd.read_csv`. In welchen Situationen sind folgende Optionen nützlich?\n\n* `sep`\n* `col`\n* `ix`\n* `na`\n* `nrows`\n* `chunks`\n\n### Aufgabe 5\n\nVerwende die Methode `to_csv`, um die Daten in einem einheitlichen Format zu speichern.\n\n### Aufgabe 6\n\nVerwende die Methode `to_dict`, um die Daten in ein Dictionary zu überführen. Welche Vor- und Nachteile bietet ein Dictionary gegenüber einem DataFrame?\n" }, { "alpha_fraction": 0.7732864618301392, "alphanum_fraction": 0.7838312983512878, "avg_line_length": 36.79999923706055, "blob_id": "ba96266daf9b116c204d088e6cee2ca75e416692", "content_id": "f914518df68e29efc3690d824430b1e841404833", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 579, "license_type": "no_license", "max_line_length": 181, "num_lines": 15, "path": "/datenvisualisierung/quereinsteiger.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Übungen für Quereinsteiger\n\nDiese Übungen sind für Leute, die die Einführung zu `pandas` verpaßt haben und den Anschluß trotzdem nicht verlieren möchten.\n\n### Aufgabe 1\n\nFühre die Programme zum Erzeugen von Diagrammen aus den Kursmaterialien aus.\n\n### Aufgabe 2\n\nBetrachte die Funktionen aus dem Modul `matplotlib` (`pylab`) auf dem Referenzblatt. Probiere 2-3 der Funktionen selbst aus, indem Du eines der Programme aus Aufgabe 1 modifizierst.\n\n### Aufgabe 3\n\nFühre eines der Beispielprogramme aus der [Matplotlib Gallery](http://matplotlib.org/gallery.html) aus.\n\n" }, { "alpha_fraction": 0.6873614192008972, "alphanum_fraction": 0.7084257006645203, "avg_line_length": 27.15625, "blob_id": "89193e5ef1028d58e5633c837848269c6733fadb", "content_id": "45213505c7729cd2a9649cbc04ba0d4599bd553d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 908, "license_type": "no_license", "max_line_length": 114, "num_lines": 32, "path": "/projekt_songtexte/aufgaben_songtexte.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Songtexte\n\n## Ziel\n\nIn diesem Projekt werden wir **aus Songtexten die Interpreten vorhersagen**. \n\nUnser Programm soll lernen, aus Sätzen wie \n\n* *\"we will dance and have a good time\"*\n* *\"I want you to know, yeah, that I still love you so\"*\n* *\"the little bags of dope, there was a pile of coke\"*\n\neinen möglichen Interpreten zu nennen.\n\nDas Projekt besteht aus fünf Teilen\n\n| # | Aufgabe | Thema |\n|---|---------|-------|\n| 1 | Daten aus dem Internet herunterladen | requests |\n| 2 | Songtitel auslesen | Reguläre Ausdrücke |\n| 3 | Songs herunterladen | Funktionen |\n| 4 | Songs einlesen | Wiederholung |\n| 5 | Klassifikation nach dem *Naive Bayes*-Verfahren | scikit-learn |\n\n## Materialien\n\n* Dokumentation zu 25 Python-Modulen zum Nachschlagen.\n* Einführung in pandas\n\n## Links\n\n[https://pudding.cool/2017/05/song-repetition/index.html](https://pudding.cool/2017/05/song-repetition/index.html)\n" }, { "alpha_fraction": 0.6325394511222839, "alphanum_fraction": 0.63470458984375, "avg_line_length": 23.484848022460938, "blob_id": "0e53ac42398b8e9e01c55aa99b58b0fe72aeed30", "content_id": "db3e50bdfc796c8538ddab08c9ab8a4e17a1c302", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3249, "license_type": "no_license", "max_line_length": 72, "num_lines": 132, "path": "/README.rst", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "Datenanalyse in Python\n======================\n\nErgebnis\n--------\n\nNach diesem Kurs können Sie tabellarische Daten mit Python automatisiert\naufbereiten, zusammenfassen und Diagramme erstellen.\n\nZielgruppe\n----------\n\nAnalysten, Wissenschaftler und Ingenieure, die größere Datenmengen\neffizienter handhaben möchten.\n\nVoraussetzungen\n---------------\n\nGrundkenntnisse in Python\n\nKursbeschreibung\n----------------\n\nDie Python-Bibliothek pandas bietet ein praktisches Alltagswerkzeug zur\nAnalyse tabellarischer Daten. Dieser Kurs verbessert Ihren Werkzeugsatz\nfür die Arbeit mit Datensätzen von wenigen hundert bis einigen Millionen\nEinträgen in Python. Der Kurs behandelt an praktischen Beispielen sowohl\ndie erkundende Datenanalyse, das Ermitteln von Kennzahlen und das\nErstellen anschaulicher Grafiken. Durch die Integration mit interaktiven\nAnalyseumgebungen wie IPython und Jupyter lassen sich viele\nFragestellungen schnell umsetzen.\n\nKursdauer\n---------\n\n14 Stunden\n\nAgenda\n------\n\n+------------------------+--------------------------+\n| Tag 1 | Tag 2 |\n+========================+==========================+\n| Einführung in pandas | Aggregatfunktionen |\n+------------------------+--------------------------+\n| Datenaufbereitung | Analyse von Zeitreihen |\n+------------------------+--------------------------+\n| Daten zusammenfassen | geographische Daten |\n+------------------------+--------------------------+\n| Datenvisualisierung | pandas in der Praxis |\n+------------------------+--------------------------+\n\nTag 1\n-----\n\nEinführung in pandas\n~~~~~~~~~~~~~~~~~~~~\n\n- Die Arbeitsumgebung zur interaktiven Datenanalyse\n- Kurzübersicht zu ``pandas``\n- Series\n- DataFrame\n- Neuerungen in Python 3\n- Jupyter Notebooks\n\nDatenaufbereitung\n~~~~~~~~~~~~~~~~~\n\n- CSV- und Excel-Dateien in ``pandas`` einlesen\n- Daten sortieren\n- Daten filtern\n- Tabellen transponieren\n- Auswahl von Zeilen und Spalten\n- ``pandas``-Tabellen speichern\n\nDaten zusammenfassen\n~~~~~~~~~~~~~~~~~~~~\n\n- statistische Kenngrößen ermitteln\n- Tabellen zusammenführen\n- hierarchische Indizierung\n- Kreuztabellen\n- Pivot-Tabellen\n\nDatenvisualisierung\n~~~~~~~~~~~~~~~~~~~\n\n- Diagramme mit ``matplotlib`` erstellen\n- ``matplotlib`` aus ``pandas`` verwenden\n- Daten in Jupyter notebooks visualisieren\n- Heatmaps\n- Multi-Panel-Diagramme\n- qualitativ hochwertige Diagramme generieren\n- andere Bibliotheken zur Datenvisualisierung\n\nTag 2\n-----\n\nAggregatfunktionen\n~~~~~~~~~~~~~~~~~~\n\n- Iteration über Zeilen und Spalten\n- Gruppieren\n- Aggregieren\n- Transformieren\n- Anwenden eigener Funktionen\n\nAnalyse von Zeitreihen\n~~~~~~~~~~~~~~~~~~~~~~\n\n- Serien von Datumsstempeln\n- Umskalieren von Zeitreihen\n- Anpassen von Zeitzonen\n- Umgang mit lückenhaften Daten\n- rollender Durchschnitt\n- einfache Prognosen\n\nUmgang mit geographischen Daten\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Speichern von Koordinaten in ``pandas``\n- Zeichnen von Karten mit ``Basemap``\n\nPandas in der Praxis\n~~~~~~~~~~~~~~~~~~~~\n\n- Mythen und Fakten\n- Numpy\n- Modellbildung in scikit-learn\n- alternative Programmpakete und Strategien zur Datenmodellierung\n- Umgang mit großen Datenmengen\n- Best Practices\n\n" }, { "alpha_fraction": 0.5754132270812988, "alphanum_fraction": 0.5940082669258118, "avg_line_length": 11.158291816711426, "blob_id": "6127f39e0e4493361112d866bfb37266b885dc49", "content_id": "eacd2f91b4e6aa592c941aa01416775d7c8bcbcc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4841, "license_type": "no_license", "max_line_length": 62, "num_lines": 398, "path": "/cheatsheet/cheatsheet_EN.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Pandas cheat sheet\n\n## Getting started\n\n### Import pandas:\n\n```python\nimport pandas as pd\n```\n\n### Create a series:\n\n```python\ns = pd.Series([1, 2, 3], index=['A', 'B', 'C'], name='col1')\n```\n\n### Create a data frame:\n\n```python\ndata = [[1, 4], [2, 5], [3, 6]]\nindex = ['A', 'B', 'C']\ndf = pd.DataFrame(data, index=index, columns=['col1', 'col2'])\n```\n\n### Load a data frame:\n\n```python\ndf = pd.read_csv('filename.csv', \n sep=',', \n names=['col1', 'col2'], \n index_col=0, \n encoding='utf-8',\n nrows=3)\n```\n\n## Selecting rows and columns\n\n### Select single column:\n\n```python\ndf['col1']\n```\n\n### Select multiple columns:\n\n```python\ndf[['col1', 'col2']]\n```\n\n### Show first n rows:\n\n```python\ndf.head(2)\n```\n\n### Show last n rows:\n\n```python\ndf.tail(2)\n```\n\n### Select rows by index values:\n\n```python\ndf.loc['A']\ndf.loc[['A', 'B']]\n```\n\n### Select rows by position:\n\n```python\ndf.loc[1]\ndf.loc[1:]\n```\n\n## Data wrangling\n\n### Filter by value:\n\n```python\ndf[df['col1'] > 1]\n```\n\n### Sort by columns:\n\n```python\ndf.sort_values(['col2', 'col2'], ascending=[False, True])\n```\n\n### Identify duplicate rows:\n\n```python\ndf.duplicated()\n```\n\n### Identify unique rows:\n\n```python\ndf['col1'].unique()\n```\n\n### Swap rows and columns:\n\n```python\ndf = df.transpose()\n```\n\n### Remove a column:\n\n```python\ndel df['col2']\n```\n\n### Clone a data frame:\n\n```python\nclone = df.copy()\n```\n\n### Connect multiple data frames vertically:\n\n```python\ndf2 = df + 10\npd.concat([df, df2])\n```\n\n## Merge multiple data frames horizontally:\n\n```python\ndf3 = pd.DataFrame([[1, 7], [8, 9]], \n\t index=['B', 'D'], \n\t columns=['col1', 'col3'])\n```\n\n### Only merge complete rows (INNER JOIN):\n\n```python\ndf.merge(df3)\n```\n\n### Left column stays complete (LEFT OUTER JOIN):\n\n```python\ndf.merge(df3, how='left')\n```\n\n### Right column stays complete (RIGHT OUTER JOIN):\n\n```python\ndf.merge(df3, how='right')\n```\n \n### Preserve all values (OUTER JOIN):\n\n```python\ndf.merge(df3, how='outer')\n```\n\n### Merge rows by index:\n\n```python\ndf.merge(df3, left_index=True, right_index=True\n```\n\n### Fill or remove NaN values:\n\n```python\ndf.fillna(0.0)\ndf.dropna()\n```\n\n### Apply your own function:\n\n```python\ndef func(x): return 2**x\ndf.apply(func)\n```\n\n## Arithmetics and statistics\n\n### Add to all values:\n\n```python\ndf + 10\n```\n\n### Sum over columns:\n\n```python\ndf.sum()\n```\n\n### Cumulative sum over columns:\n\n```python\ndf.cumsum()\n```\n\n### Mean over columns:\n\n```python\ndf.mean()\n```\n\n### Standard devieation over columns:\n\n```python\ndf.std()\n```\n\n### Count unique values:\n\n```python\ndf['col1'].value_counts()\n```\n\n### Summarize descriptive statistics:\n\n```python\ndf.describe()\n```\n\n## Hierarchical indexing\n\n### Create hierarchical index:\n\n```python\ndf.stack()\n```\n\n### Dissolve hierarchical index:\n\n```python\ndf.unstack()\n```\n\n## Aggregation\n\n### Create group object:\n\n```python\ng = df.groupby('col1')\n```\n\n### Iterate over groups:\n\n```python\nfor i, group in g:\n print(i, group)\n```\n\n### Aggregate groups:\n\n```python\ng.sum()\ng.prod()\ng.mean()\ng.std()\ng.describe()\n```\n\n### Select columns from groups:\n\n```python\ng['col2'].sum()\ng[['col2', 'col3']].sum()\n```\n\n### Transform values:\n\n```python\nimport math\ng.transform(math.log)\n```\n\n### Apply a list function on each group:\n\n```python\ndef strsum(group):\n return ''.join([str(x) for x in group.values])\ng['col2'].apply(strsum)\n```\n\n## Data export\n\n### Data as NumPy array:\n\n```python\ndf.values\n```\n\n### Save data as CSV file:\n\n```python\ndf.to_csv('output.csv', sep=\",\")\n```\n\n### Format a data frame as tabular string:\n\n```python\ndf.to_string()\n```\n\n### Convert a data frame to a dictionary:\n\n```python\ndf.to_dict()\n```\n\n### Save a data frame as Excel table:\n\n```python\ndf.to_excel('output.xlsx')\n```\n\n(requires package `xlwt`)\n\n## Visualization\n\n### Import matplotlib:\n\n```python\nimport pylab as plt\n```\n\n### Start a new diagram:\n\n```python\nplt.figure()\n```\n\n### Scatter plot:\n\n```python\ndf.plot.scatter('col1', 'col2', style='ro')\n```\n\n### Bar plot:\n\n```python\ndf.plot.bar(x='col1', y='col2', width=0.7)\n```\n\n### Area plot:\n\n```python\ndf.plot.area(stacked=True, alpha=1.0)\n```\n\n### Box-and-whisker plot:\n\n```python\ndf.plot.box()\n```\n\n### Histogram over one column:\n\n```python\ndf['col1'].plot.hist(bins=3)\n```\n\n### Histogram over all columns:\n\n```python\ndf.plot.hist(bins=3, alpha=0.5)\n```\n\n### Set tick marks:\n\n```python\nlabels = ['A', 'B', 'C', 'D']\npositions = [1.0, 2.0, 3.0, 4.0]\nplt.xticks(positions, labels)\nplt.yticks(positions, labels)\n```\n\n### Select area to plot:\n\n```python\nplt.axis([0.0, 2.5, 0.0, 10.0])\n# [from x, to x, from y, to y]\n```\n\n### Label diagram and axes:\n\n```python\nplt.title('Correlation')\nplt.xlabel('Nunstück')\nplt.ylabel('Slotermeyer')\n```\n\n### Save most recent diagram:\n\n```python\nplt.savefig('plot.png')\nplt.savefig('plot.png', dpi=300)\nplt.savefig('plot.svg')\n```\n" }, { "alpha_fraction": 0.534426212310791, "alphanum_fraction": 0.6426229476928711, "avg_line_length": 21.55555534362793, "blob_id": "9985bd0bb21dc906361ef416b5adb8b4b6c97d62", "content_id": "de9076f0a1920a2da154d56066843a3c46462ca8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 610, "license_type": "no_license", "max_line_length": 74, "num_lines": 27, "path": "/datenvisualisierung/balkendiagramm.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\nfrom pylab import figure, title, xlabel, ylabel, xticks, bar\nfrom pylab import legend, axis, savefig\n\nnucleotides = [\"A\", \"G\", \"C\", \"U\"]\n\ncounts = [\n [606, 1024, 759, 398],\n [762, 912, 639, 591],\n]\n\nfigure()\ntitle('RNA-Nukleotide im Ribosom')\nxlabel('RNA')\nylabel('Anzahl Nukleotide')\n\nx1 = [2.0, 4.0, 6.0, 8.0]\nx2 = [x - 0.5 for x in x1]\n\nxticks(x1, nucleotides)\n\nbar(x2, counts[1], width=0.5, color=\"#87CEEB\", label=\"E.coli 23S\")\nbar(x1, counts[0], width=0.5, color=\"#F4A460\", label=\"T.thermophilus 23S\")\n\nlegend()\naxis([0.5, 9.5, 0, 1200])\nsavefig('balkendiagramm.png')\nsavefig('balkendiagramm.svg')\n" }, { "alpha_fraction": 0.6167800426483154, "alphanum_fraction": 0.6712018251419067, "avg_line_length": 18.130434036254883, "blob_id": "43cd557b848d34ac6906362be804577e1c284322", "content_id": "1ef57da423c04a4cfdb3e362f2520448577508e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 441, "license_type": "no_license", "max_line_length": 56, "num_lines": 23, "path": "/datenvisualisierung/histogramm.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\nimport pylab as plt\nimport pandas as pd\nimport random\n\nN_POINTS = 10000\nN_BINS = 50\n\ndata = [random.gauss(0.0, 1.0) for i in range(N_POINTS)]\ns = pd.Series(data)\n\n\nplt.figure()\nplt.hist(s, N_BINS, normed=1.0, histtype='bar',\n facecolor='green', alpha=0.75)\n\nplt.title('Histogramm')\nplt.xlabel('Wert')\nplt.ylabel('Frequenz')\nplt.axis([-5.0, 5.0, 0.0, 0.5])\nplt.grid(True)\n\nplt.savefig('histogramm.png')\nplt.savefig('histogramm.svg')\n" }, { "alpha_fraction": 0.6568889021873474, "alphanum_fraction": 0.6613333225250244, "avg_line_length": 23.434782028198242, "blob_id": "e0d5c542837a9751082fdba23721f1370a894b77", "content_id": "87bd1187a16f93c003f10c23d5aa9a017d505a5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1125, "license_type": "no_license", "max_line_length": 74, "num_lines": 46, "path": "/einstieg/firstletter_statistics.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\nimport pandas as pd\nimport os\n\nnames = []\nPATH = 'names/'\n\nfor fn in sorted(os.listdir(PATH)):\n if not fn.endswith('.txt'): continue\n df = pd.read_csv(PATH + fn, names=['name', 'gender', 'count'])\n df['year'] = int(fn[-8:-4])\n names.append(df)\n\n# make a single DataFrame from a list of DataFrames\nnames = pd.concat(names)\n\ndef first_letter(x): return x[0]\nnames['first'] = names['name'].apply(first_letter)\n\n# separate boys and girls lists\nboys = names[names.gender=='M']\ngirls = names[names.gender=='F']\n\n# statistics on single names\ndef findname(df, name): \n return df[df['name']==name].sort_values('year')\nprint(findname(boys, \"Tyrion\"))\n\n\ntyrion[['year', 'count']].transpose()\nty = tyrion.set_index('year')\nty['count']\n\nmax(boys[boys['name']==\"Donald\"]['count'])\n\n# total population\ng = names.groupby('year')\nprint(g['count'].apply(sum))\n\n# first letter statistics\nmrc = boys.groupby('first')['count'].apply(sum)\nfrc = girls.groupby('first')['count'].apply(sum)\nprint(mrc / frc)\n\nnamesum = names[names['first'] == 'Q'].groupby('name')['count'].apply(sum)\nnamesum.sort(ascending=False)\nprint(namesum[:10])\n" }, { "alpha_fraction": 0.6854928135871887, "alphanum_fraction": 0.7087486386299133, "avg_line_length": 22.128204345703125, "blob_id": "a72641b518318b4e89eea6c5a1670b6da230bdbc", "content_id": "182337e2caa1354fbbec7d3472598d03fbb45f75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 905, "license_type": "no_license", "max_line_length": 82, "num_lines": 39, "path": "/datenaufbereitung/demo_demographie.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\nimport pandas as pd\nimport pylab as plt\n\npop = pd.read_excel('gapminder_population.xlsx', index_col=0)\n\ncc = pd.read_csv('country_codes.csv', index_col=0)\ncc = cc[['region', 'sub-region']]\n\npop = pd.merge(pop, cc, left_index=True, right_index=True, how='left')\n\n\ncontinent = pop.groupby('region').sum()\n\nctrans = continent.transpose()\nctrans = ctrans / 1000000\n\nctrans.plot.area()\nplt.xlabel('Jahr')\nplt.ylabel('Bevölkerung [Mio.]')\nplt.savefig('hist.png')\nplt.savefig('hist.svg')\n\ncdiff = ctrans.diff()\n# cdiff.ix[cdiff.index[:16]] = cdiff.ix[cdiff.index[:16]] / 10\n# cdiff.plot.area(stacked=False)\n\ncdiff.ix[cdiff.index[16:]].plot.area(stacked=False)\nplt.ylabel('Bevölkerungswachstum [Mio/Jahr]')\nplt.xlabel('Jahr')\nplt.savefig('diff.png')\nplt.savefig('diff.svg')\n\n'''\nSources:\n\nData from www.gapminder.org/data\n\nCountry codes from https://github.com/lukes/ISO-3166-Countries-with-Regional-Codes\n'''\n" }, { "alpha_fraction": 0.6696329116821289, "alphanum_fraction": 0.6763070225715637, "avg_line_length": 43.79999923706055, "blob_id": "0fcd07e2f3a380d64be0d81a9653295324f3de48", "content_id": "a63e324630ad3dd728e32e76994516b82044c234", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 903, "license_type": "no_license", "max_line_length": 161, "num_lines": 20, "path": "/datenvisualisierung/README.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Datenvisualisierung\n\nDie Übungen zur Datenvisualisierung bestehen aus sechs Teilen:\n\n| Übung | Beschreibung | Schwierigkeitsgrad |\n|-------|--------------|--------------------|\n| 1 | Balkendiagramm plotten | LEICHT |\n| 2 | Korrelation plotten | LEICHT |\n| 3 | Histogramm plotten | MITTEL |\n| 4 | mehrere Korrelationen plotten | MITTEL |\n| 5 | animierter Scatterplot | SCHWER |\n| 6 | Übungen für Quereinsteiger | SEHR LEICHT |\n\n\n### Empfohlene Materialien\n\n* Beispielskripte zum plotten mit `pandas` und `matplotlib`\n* Tabellen zur Demographie von [www.gapminder.org](http://www.gapminder.org)\n* Tutorial zum plotten mit `pandas` auf [pandas.pydata.org/pandas-docs/stable/visualization.html](http://pandas.pydata.org/pandas-docs/stable/visualization.html)\n* Matplotlab Gallery [matplotlib.org/gallery.html](http://matplotlib.org/gallery.html)\n\n\n" }, { "alpha_fraction": 0.7083708643913269, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 18.473684310913086, "blob_id": "f980adcd360513d235828324100297b6a66ed713", "content_id": "e52c5cc67b1ace8b3ae52d15983f2eedee4b15a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1112, "license_type": "no_license", "max_line_length": 52, "num_lines": 57, "path": "/datenaggregation/beispiele_aggregation.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\nfrom beispiele_gruppen import g1, g2, g3, g4, g5, g6\n\n# Aggregation mit Standardfunktionen\ngx.mean()\ngx.max()\ngx.min()\ngx.sum()\ngx.count()\ngx.std()\ngx.median()\ngx.quantile(0.9)\ngx.describe()\n\n# Aggregation mit Spaltenauswahl\ngx['population'].describe()\n\n# Aggregation mit Liste von Funktionen\ngx.agg(['count', 'mean', 'std'])\ngx.agg([('Summe', 'sum')])\n\n# Eigene Aggregatfunktion definieren\ndef summe_groesser200(array):\n return sum([x for x in array if x>200])\n\ngx.agg(summe_groesser200)\n\n# Eigene Aggregatfunktion mit Parameter\ndef summe_groesser(array, threshold):\n return sum([x for x in array if x>threshold])\n\ngx.agg(summe_groesser, threshold=200)\n\n# Iterieren über Gruppen\nfor name, group in gx:\n print(name)\n print(group)\n\n# Gruppen als Dictionary\ndict(list(gx))\n\n# Transformation mit Funktionsname\ngx.transform('mean')\n\n# Transformation mit Funktion\ngx.transform(len)\n\n# Transformation mit eigener Funktion\ndef normalisieren(array):\n return array - array.mean()\n\ngx.transform(normalisieren)\n\n# Beliebige Funktion anwenden\ndef erste_zwei(df):\n return df.head(2)\n\ngx.apply(erste_zwei)\n" }, { "alpha_fraction": 0.7674023509025574, "alphanum_fraction": 0.7775891423225403, "avg_line_length": 29.947368621826172, "blob_id": "f0aab0cf9ecd5f7a2f310ac491b7f46ef894d8f1", "content_id": "c47d9ba1e58bb598514809494b0a81184bfab4c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 598, "license_type": "no_license", "max_line_length": 150, "num_lines": 19, "path": "/datenvisualisierung/mehrere_korrelationen.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Mehrere Korrelationen plotten\n\nIn dieser Übung werden wir mehrere Scatterplots in ein Diagramm zeichnen.\n\n### Schritt 1\n\nLade 2-3 weitere Datensätze von [www.gapminder.org](http://www.gapminder.org) herunter.\n\n### Schritt 2\n\nErstelle nach dem Schema der ersten Übung mehrere Scatterplots.\n\n### Schritt 3\n\nLies das Programm `multipanel.py`. Passe Dein Programm so an, daß das fertige Diagramm vier Scatterplots enthält.\n\n### Schritt 4\n\nLies das Programm `streudiagramm.py`. Passe Dein Programm so an, daß die Größe der Kugeln eine weitere Variable aus den Gapminder-Daten repräsentiert.\n" }, { "alpha_fraction": 0.706204354763031, "alphanum_fraction": 0.7189781069755554, "avg_line_length": 17.827587127685547, "blob_id": "1a231fc88a4b780ecbdea77ddcf80b5a4aa794a6", "content_id": "fd8fd9210b4fec5ba7553b07c7a23ebb73e28ba2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 69, "num_lines": 29, "path": "/datenaufbereitung/gapminder.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\nimport pandas as pd\n\n#\n# Dateien einlesen\n#\ncountries = pd.read_csv(\"countries.csv\", index_col=0)\n\nfertility = pd.read_csv(\"gapminder_total_fertility.csv\", index_col=0)\n\npop = pd.read_excel('gapminder_population.xlsx', index_col=0)\n\nincome = pd.read_json(\"gapminder_gdp_per_capita.json\")\n\n#\n# Vereinigen\n#\npop2 = pop.stack()\npop2.name = 'population'\n\nfert2 = fertility.stack()\nfert2.name = 'fertility'\n\n# compare with\npop.transpose().stack()\n\npop.merge(fertility, 'outer', left_index=True, right_index=True)\n\n\ncountries.to_clipboard(index=False)\n\n" }, { "alpha_fraction": 0.7051070928573608, "alphanum_fraction": 0.7166392207145691, "avg_line_length": 18.54838752746582, "blob_id": "5df40aeac34c05cad20e56d9c0fe15fc7861515e", "content_id": "f54e33f144439159e499abb19c8aa590173a933b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 613, "license_type": "no_license", "max_line_length": 59, "num_lines": 31, "path": "/datenaufbereitung/verbinden_aufteilen.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# DataFrames verbinden und aufteilen\n\n### Problem: Zwei DataFrames vertikal zusammenführen\n\n concat\n concat (axis, partial)\n\n### Problem: Zwei DataFrames horizontal zusammenführen\n\n merge\n merge, Optionen\n\n### Problem: Spalten sollen andere Namen bekommen\n df.add_prefix('bla_')\n df.add_suffix('_bla')\n\n### Problem: Spalte befüllen\n broadcasting\n merge diff (p.133)\n\n### Problem: Schnittmenge aus 2 Data Frames\n p.126\n\n### Problem: Daten in gleich große Klassen (bins) einteilen\n cut\n qcut\n\n### Problem: langes in breites Format überführen\n stack\n unstack\n Optionen von stack/unstack\n" }, { "alpha_fraction": 0.6529080867767334, "alphanum_fraction": 0.705440878868103, "avg_line_length": 15.136363983154297, "blob_id": "45d541462ef906c3ede845dc1085026b31a6a133", "content_id": "1c2d1cdb7c5028b8f567b4d86052e53335c736f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1067, "license_type": "no_license", "max_line_length": 67, "num_lines": 66, "path": "/pandas_grundlagen/step_by_step.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\nimport pandas as pd\n\nlaender = pd.read_csv('grosse_laender_2015.csv', index_col=0)\n\n# ---- Info für Karten ----\n\nlaender.values\n\n# siehe (p.125)\nlaender.index\n\ndf = laender.transpose()\n\nger = pd.Series([80000000, 1.4], index=['population', 'fertility'])\n\ndef anfangsbuchstabe(s): return s[0]\nlaender['initial'] = laender['continent'].apply(anfangsbuchstabe)\n\nlaender.index\n\nlaender['continent']\n\nlaender['population'] > 200000000\n\nlaender[laender['population'] > 200000000]\n\n'Russia' in df\n\nlaender['fertility'] * 1.5\n\nlaender['population'] + 1000000\n\nlaender[['population', 'continent']]\n\nlaender.ix[3:7]\n\n# p.143/144\nlaender.describe()\n\nlaender['population'].mean()\n\nlaender['population'].sum()\n\nlaender.cumsum()\n\nlaender.head(3)\n\nlaender.tail(3)\n\nlaender.shape()\n\nimport pylab as plt\nlaender.plot('population', 'fertility', style='ro')\nplt.savefig('pop.png')\n\nlaender['continent'].value_counts()\n\nlaender.groupby('continent')['population'].sum()\n\nlaender.stack()\n\nlaender.sort(['continent', 'fertility'])\n\nlaender['continent'].unique()\n\ndel laender['fertility']\n" }, { "alpha_fraction": 0.761904776096344, "alphanum_fraction": 0.761904776096344, "avg_line_length": 17.25, "blob_id": "f41f7e5be139230a0040239755f2a5a0a243c625", "content_id": "bb5e81910797fc62440ca443dc36f3f2622cb394", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 149, "license_type": "no_license", "max_line_length": 36, "num_lines": 8, "path": "/datenaufbereitung/README.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Datenaufbereitung\n\n## Ziele:\n\n* Import / Export von Daten\n* Zeilen und Spalten auswählen\n* DataFrames verbinden und aufteilen\n* Werte verändern\n" }, { "alpha_fraction": 0.7638773918151855, "alphanum_fraction": 0.7667771577835083, "avg_line_length": 22.42718505859375, "blob_id": "1ee5de77e88ce6098f78e8829c61e816f38687a5", "content_id": "262d8c0a415c30254b9d680bd3238be960c8d75d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2414, "license_type": "no_license", "max_line_length": 260, "num_lines": 103, "path": "/README_EN.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Data Analysis in Python\n\n## Outcome\n\nAfter this course you will be able to process, summarize and visualize tabular data efficiently using the `pandas` library.\n\n## Target Audience\n\nAnalysts, researchers and engineers who would like to handle larger data sets more efficiently.\n\n## Prerequisites\n\nBasic knowledge of Python\n\n## Course Description\n\nThe `pandas` Python library is a practical everyday tool for the analysis of tabular data. \nThis course improves your skillset for working with datasets ranging from a few dozen to a several million entries in Python. The course uses hands-on examples to cover exploratory data analysis, extracting relevant summaries and creating attractive diagrams. \nThe integration of `pandas` with interactive environments like IPython und Jupyter will allow you to support answers to many questions with data quickly.\n\n## Course Duration\n\n14 hours\n\n## Course Outline\n\n| Day 1 | Day 2 |\n|-------|-------|\n| Introduction to pandas | Aggregation |\n| Data Wrangling | Analyzing Time Series |\n| Summarizing Data | Geographical Data |\n| Data Visualization | pandas Best Practices |\n\n## Day 1\n\n### Introduction to pandas\n\n* Your environment for interactive data analysis\n* overview of the `pandas` library\n* Series\n* DataFrames\n* Improvements in Python 3\n* Jupyter Notebooks\n\n\n### Data Wrangling\n\n* reading CSV- and Excel files to `pandas`\n* sorting data\n* transposing tables\n* selecting rows and columns\n* saving `pandas`-tables\n\n### Summarizing data\n\n* extracting statistical metrics\n* merging tables\n* hierarchical indexing\n* crosstables\n* pivot tables\n\n### Data Visualization\n\n* creating diagrams with `matplotlib`\n* using `matplotlib` from within `pandas`\n* visualizing data in Jupyter notebooks\n* heatmaps\n* multi-panel diagrams\n* creating high-quality figures\n* other libraries for visualizing data\n\n## Day 2\n\n### Aggregation\n\n* iterating rows and columns\n* grouping\n* aggregation functions\n* transformation functions\n* applying your own functions\n\n### Analyzing Time Series\n\n* series of timestamps\n* rescaling time series\n* changing timezones\n* handling data with gaps\n* rolling means\n* simple predictions\n\n### Geographical Data\n\n* storing coordinates in `pandas`\n* drawing maps with `Basemap`\n\n### Best Practices\n\n* myths and facts\n* Numpy\n* machine learning models in scikit-learn\n* alternative libraries and modeling strategies\n* handling huge datasets\n* do's and don'ts\n" }, { "alpha_fraction": 0.5988957285881042, "alphanum_fraction": 0.601169228553772, "avg_line_length": 22.496183395385742, "blob_id": "cd2013ce8316a5486966235ce58385363b54b27c", "content_id": "3bf7511d9752c78e193936e451c5e9b3b4956ccb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 3079, "license_type": "no_license", "max_line_length": 72, "num_lines": 131, "path": "/README_EN.rst", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "Data Analysis in Python\n=======================\n\nOutcome\n-------\n\nAfter this course you will be able to process, summarize and visualize\ntabular data efficiently using the ``pandas`` library.\n\nTarget Audience\n---------------\n\nAnalysts, researchers and engineers who would like to handle larger data\nsets more efficiently.\n\nPrerequisites\n-------------\n\nBasic knowledge of Python\n\nCourse Description\n------------------\n\nThe ``pandas`` Python library is a practical everyday tool for the\nanalysis of tabular data. This course improves your skillset for working\nwith datasets ranging from a few dozen to a several million entries in\nPython. The course uses hands-on examples to cover exploratory data\nanalysis, extracting relevant summaries and creating attractive\ndiagrams. The integration of ``pandas`` with interactive environments\nlike IPython und Jupyter will allow you to support answers to many\nquestions with data quickly.\n\nCourse Duration\n---------------\n\n14 hours\n\nCourse Outline\n--------------\n\n+--------------------------+-------------------------+\n| Day 1 | Day 2 |\n+==========================+=========================+\n| Introduction to pandas | Aggregation |\n+--------------------------+-------------------------+\n| Data Wrangling | Analyzing Time Series |\n+--------------------------+-------------------------+\n| Summarizing Data | Geographical Data |\n+--------------------------+-------------------------+\n| Data Visualization | pandas Best Practices |\n+--------------------------+-------------------------+\n\nDay 1\n-----\n\nIntroduction to pandas\n~~~~~~~~~~~~~~~~~~~~~~\n\n- Your environment for interactive data analysis\n- overview of the ``pandas`` library\n- Series\n- DataFrames\n- Improvements in Python 3\n- Jupyter Notebooks\n\nData Wrangling\n~~~~~~~~~~~~~~\n\n- reading CSV- and Excel files to ``pandas``\n- sorting data\n- transposing tables\n- selecting rows and columns\n- saving ``pandas``-tables\n\nSummarizing data\n~~~~~~~~~~~~~~~~\n\n- extracting statistical metrics\n- merging tables\n- hierarchical indexing\n- crosstables\n- pivot tables\n\nData Visualization\n~~~~~~~~~~~~~~~~~~\n\n- creating diagrams with ``matplotlib``\n- using ``matplotlib`` from within ``pandas``\n- visualizing data in Jupyter notebooks\n- heatmaps\n- multi-panel diagrams\n- creating high-quality figures\n- other libraries for visualizing data\n\nDay 2\n-----\n\nAggregation\n~~~~~~~~~~~\n\n- iterating rows and columns\n- grouping\n- aggregation functions\n- transformation functions\n- applying your own functions\n\nAnalyzing Time Series\n~~~~~~~~~~~~~~~~~~~~~\n\n- series of timestamps\n- rescaling time series\n- changing timezones\n- handling data with gaps\n- rolling means\n- simple predictions\n\nGeographical Data\n~~~~~~~~~~~~~~~~~\n\n- storing coordinates in ``pandas``\n- drawing maps with ``Basemap``\n\nBest Practices\n~~~~~~~~~~~~~~\n\n- myths and facts\n- Numpy\n- machine learning models in scikit-learn\n- alternative libraries and modeling strategies\n- handling huge datasets\n- do's and don'ts\n\n" }, { "alpha_fraction": 0.7170385122299194, "alphanum_fraction": 0.7302231192588806, "avg_line_length": 24.230770111083984, "blob_id": "d4f015a88cc267029824e7f80c87162537deabb2", "content_id": "a3982f156052a8dbe5cc17761b046b200992297e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 988, "license_type": "no_license", "max_line_length": 121, "num_lines": 39, "path": "/projekt_songtexte/teil4_songs_einlesen.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Teil 4: Songs einlesen\n\n*Diesen Teil kannst Du bequem bearbeiten, wenn das andere Proramm noch im Hintergrund mit Herunterladen beschäftigt ist.*\n\n## Aufgabe 4.1\n\nErstelle ein neues Programm `songs_einlesen.py`\n\n## Aufgabe 4.2\n\nGib die Namen aller Songdateien aus. Verwende das Modul `os`:\n\n import os\n for dateiname in os.listdir(PFAD):\n print(dateiname)\n\n## Aufgabe 4.3\n\nBetrachte eine Songdatei im Texteditor. Finde heraus, wo der eigentliche Text beginnt und endet.\n\n## Aufgabe 4.4\n\nLies eine Songdatei als einzelnen String ein. Verwende dazu `read()`:\n\n text = open(dateiname).read()\n\nSchneide den Songtext aus. Dies geht recht gut mit Hilfe von `text.find()`.\n\n## Aufgabe 4.5\n\nVerpacke das Einlesen eines Songs in eine Funktion, die den Text als String zurückgibt.\n\n## Aufgabe 4.6\n\nLies alle Songs in eine Liste von Strings ein. Verpacke auch diesen Code in eine Funktion:\n\n def songtexte_auslesen(verzeichnis):\n # Dein Code\n return textliste\n\n" }, { "alpha_fraction": 0.7874659299850464, "alphanum_fraction": 0.7874659299850464, "avg_line_length": 14.166666984558105, "blob_id": "8381b1054976f3973ff1b2f98820180b13c6bc1d", "content_id": "d724bc48e167e7b5c995604f105b0c2e62c67dfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 370, "license_type": "no_license", "max_line_length": 44, "num_lines": 24, "path": "/zeitreihen/README.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Analyse von Zeitreihen\n\n**Beispiel: Flugzeugabstürze oder TerrorDB**\n\nMETHODE: Schritt-für-Schritt-Anleitung\n\n## Serien von Datumsstempeln\ndate_range\nPerioden\n\n## Umskalieren von Zeitreihen\nupsampling\ndownsampling\n\n## Anpassen von Zeitzonen\n\n## rollender Durchschnitt\n\nmoving average\nrolling_* Funktionen\n\n## Umgang mit lückenhaften Daten\n\n## einfache Prognosen\n\n\n" }, { "alpha_fraction": 0.6987704634666443, "alphanum_fraction": 0.7144808769226074, "avg_line_length": 26.101852416992188, "blob_id": "ad160a4419d6f6d7c943425cf89b6207f6ceab1b", "content_id": "8ded4148f8d0f9a485f8ce18b548eafc841c3720", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2943, "license_type": "no_license", "max_line_length": 227, "num_lines": 108, "path": "/projekt_songtexte/teil5_vorhersage.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Teil 5: Vorhersage\n\nHier werden wir ein Standard-Kochrezept nachbauen (siehe [http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html](http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html)).\n\n## Aufgabe 5.1\n\nErstelle ein neues Modul `vorhersage.py`.\n\nImportiere die Funktion zum Einlesen aus dem Modul `songs_einlesen.py` ein:\n\n from songs_einlesen import songtexte_auslesen\n\n## Aufgabe 5.2\n\nBereite die Daten zur Vorhersage vor, indem Du in `X` die Liste mit Songtexten sammelst, in `y` die Namen der Interpreten. Zum Beispiel:\n\n X = madonna + eminem\n y = ['madonna'] * len(madonna) + ['eminem'] * len(eminem)\n\nStelle sicher, daß X und y gleich lang sind.\n\n## Aufgabe 5.3\n\nImportiere ein paar Sachen aus `scikit-learn`:\n\n from sklearn.feature_extraction.text import CountVectorizer\n from sklearn.feature_extraction.text import TfidfTransformer\n from sklearn.naive_bayes import MultinomialNB\n from sklearn.pipeline import Pipeline\n from sklearn import model_selection\n\n## Aufgabe 5.4\n\nZerlege den Datensatz in Trainings- und Testdaten. Setze für `ANTEIL_TESTDATEN` eine Zahl zwischen 0 und 1 ein:\n\nXtrain, Xtest, ytrain, ytest = \\\n model_selection.train_test_split(X, y, test_size=ANTEIL_TESTDATEN)\n\n## Aufgabe 5.5\n\nBaue das Modell:\n\n model = Pipeline([\n ('vectorizer', CountVectorizer(min_df=3, ngram_range=(1, 1))),\n ('tfidf_transformer', TfidfTransformer()),\n ('bayes_model', MultinomialNB(alpha=1.0)),\n ])\n model.fit(Xtrain, ytrain)\n\n## Aufgabe 5.6\n\nGib die Anzahl vektorisierter Wörter aus:\n\n vect = model.named_steps['vectorizer']\n print(len(vect.vocabulary_))\n\n## Aufgabe 5.7\n\nWerte das Modell aus:\n\n print(\"Genauigkeit: \", model.score(Xtest, ytest))\n\nWerte das Modell auch für den Testdatensatz aus. Wie bewertest Du das Ergebnis?\n\n## Aufgabe 5.8\n\nVerändere den Parameter `alpha`. Wie verändert sich die Vorhersage?\n\n## Aufgabe 5.9\n\nFühre eine 10-fache Kreuzvalidierung durch:\n\n print(model_selection.cross_val_score(model, X, y, \n cv=10, scoring='accuracy'))\n\n\n## Aufgabe 5.10\n\nFühre eine Vorhersage durch:\n\n model.predict(\"take the 8mile road in detroit\")\n\n## Aufgabe 5.11\n\nGib typische Wörter für die verglichenen Künstler aus:\n\n import numpy as np\n names = np.array(model.named_steps['vectorizer'].get_feature_names())\n\n coef = model.named_steps['bayes_model'].coef_\n coef = coef.reshape((len(names),))\n\n # Top-Wörter für 1. Interpreten\n indices = (-coef).argsort()[:20].tolist()\n print(names[indices])\n\n # Top-Wörter für 2. Interpreten\n indices = (coef).argsort()[:20].tolist()\n print(names[indices])\n\n## Aufgabe 5.11\n\nProbiere unterschiedliche Optionen aus:\n\n* variiere den Anteil der Testdaten\n* variiere `min_df` beim `CountVectorizer`\n* gib beim `CountVectorizer` die Option `stop_words='english'`\n* variiere `ngram_range` beim `CountVectorizer`\n" }, { "alpha_fraction": 0.7139534950256348, "alphanum_fraction": 0.7162790894508362, "avg_line_length": 16.15999984741211, "blob_id": "937c23f7786e46f709da9c35a20a917ec0b2a1d7", "content_id": "bd391ba8731cbd6cc75bdccbb25b3da98e212a75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 437, "license_type": "no_license", "max_line_length": 56, "num_lines": 25, "path": "/datenaufbereitung/zeilen_spalten_auswaehlen.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Zeilen und Spalten auswählen\n\n### Problem: bestimmte Zeilen auswählen\n\n### Problem: doppelte Einträge\n duplicated\n drop_duplicates\n unique\n\n### Problem: Ausreißer filtern\n df[df['x'] <= y0]\n\n### Problem: Bestehende Spalte als neuen Index verwenden\n reindex\n rename\n\n### Problem: Fehlende Werte löschen\n isnull\n dropna\n\n### Problem: bestimmte Zeilen löschen\n drop\n\n### Problem: bestimmte Spalten löschen\n delete col\n" }, { "alpha_fraction": 0.4201604127883911, "alphanum_fraction": 0.4329201579093933, "avg_line_length": 12.37804889678955, "blob_id": "fb9e0ef30e62950557c1aafcaa6b8fab71cdf6da", "content_id": "f2a70e2a041cf338aab7a0ad375cbde0ded67a55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 5511, "license_type": "no_license", "max_line_length": 66, "num_lines": 410, "path": "/cheatsheet/cheatsheet.rst", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "pandas Cheat Sheet\n==================\n\nEinstieg\n--------\n\npandas importieren\n~~~~~~~~~~~~~~~~~~\n\n::\n\n import pandas as pd\n\nSeries erstellen\n~~~~~~~~~~~~~~~~\n\n::\n\n s = pd.Series([1, 2, 3], index=['A', 'B', 'C'], name='col1')\n\nDataFrame erstellen\n~~~~~~~~~~~~~~~~~~~\n\n::\n\n data = [[1, 4], [2, 5], [3, 6]]\n index = ['A', 'B', 'C']\n df = pd.DataFrame(data, index=index, columns=['col1', 'col2'])\n\nDataFrame laden\n~~~~~~~~~~~~~~~\n\n::\n\n df = pd.read_csv('dateiname.csv', \n sep=',', \n names=['col1', 'col2'], \n index_col=0, \n encoding='utf-8',\n nrows=3)\n\nZeilen und Spalten indizieren\n-----------------------------\n\neine Spalte auswählen\n~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df['col1']\n\nmehrere Spalten auswählen\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df[['col1', 'col2']]\n\nerste n Zeilen anzeigen\n~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.head(2)\n\nletzte n Zeilen anzeigen\n~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.tail(2)\n\nZeilen nach Index-Werten auswählen\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.loc['A']\n df.loc[['A', 'B']]\n\nZeilen nach Position auswählen\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.loc[1]\n df.loc[1:]\n\nDatenaufbereitung\n-----------------\n\nnach Werten filtern\n~~~~~~~~~~~~~~~~~~~\n\n::\n\n df[df['col1'] > 1]\n\nnach Spalten sortieren\n~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.sort_values(['col2', 'col2'], ascending=[False, True])\n\ndoppelte Zeilen identifizieren\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.duplicated()\n\neindeutige Zeilen finden\n~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df['col1'].unique()\n\nSpalten und Zeilen vertauschen\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df = df.transpose()\n\neine Spalte löschen\n~~~~~~~~~~~~~~~~~~~\n\n::\n\n del df['col2']\n\nganzes DataFrame kopieren\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n kopie = df.copy()\n\nMehrere DataFrames vertikal verketten\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df2 = df + 10\n pd.concat([df, df2])\n\nDataFrames horizontal verbinden\n-------------------------------\n\n::\n\n df3 = pd.DataFrame([[1, 7], [8, 9]], \n index=['B', 'D'], \n columns=['col1', 'col3'])\n\nnur komplett definierte Zeilen (INNER JOIN)\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.merge(df3)\n\nlinke Spalte bleibt vollständig (LEFT OUTER JOIN)\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.merge(df3, how='left')\n\nrechte Spalte bleibt vollständig (RIGHT OUTER JOIN)\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.merge(df3, how='right')\n\nalle Einträge vollständig (OUTER JOIN)\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.merge(df3, how='outer')\n\nZeilen über Indices zusammenführen\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.merge(df3, left_index=True, right_index=True\n\nunbesetzte Werte auffüllen oder löschen\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.fillna(0.0)\n df.dropna()\n\neigene Funktion anwenden\n~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n def func(x): return 2**x\n df.apply(func)\n\nArithmetik und Statistik\n------------------------\n\nAddition zu allen Werten\n~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df + 10\n\nSumme über Spalten\n~~~~~~~~~~~~~~~~~~\n\n::\n\n df.sum()\n\nkumulative Summe über Spalten\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.cumsum()\n\nMittelwert über Spalten\n~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.mean()\n\nStandardabweichung über Spalten\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.std()\n\nHäufigkeit aller Werte ausgeben\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df['col1'].value_counts()\n\nDeskriptive Statistiken für Spalten\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.describe()\n\nhierarchische Indizierung\n-------------------------\n\nhierarchischen Index erstellen\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.stack()\n\nhierarchischen Index auflösen\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.unstack()\n\nAggregation\n-----------\n\nGruppen bilden\n~~~~~~~~~~~~~~\n\n::\n\n g = df.groupby('col1')\n\nüber Gruppen iterieren\n~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n for i, group in g:\n print(i, group)\n\nGruppen aggregieren\n~~~~~~~~~~~~~~~~~~~\n\n::\n\n g.sum()\n g.prod()\n g.mean()\n g.std()\n g.describe()\n\nSpalten aus Gruppen auswählen\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n g['col2'].sum()\n g[['col2', 'col3']].sum()\n\neigene Funktion auf jede Gruppe anwenden\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n def strsum(group):\n return ''.join([str(x) for x in group.values])\n g['col2'].apply(strsum)\n\nDatenexport\n-----------\n\nDaten als NumPy-Array\n~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.values\n\nDaten als CSV-Datei speichern\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.to_csv('ausgabe.csv', sep=\",\")\n\nDataFrame als Tabellen-String formatieren\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.to_string()\n\nDataFrame zu Dictionary konvertieren\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.to_dict()\n\nVisualisierung\n--------------\n\n::\n\n import pylab as plt\n plt.figure()\n\nStreudiagramm\n~~~~~~~~~~~~~\n\n::\n\n df.plot.scatter('col1', 'col2', style='ro')\n\nBalkendiagramm\n~~~~~~~~~~~~~~\n\n::\n\n df.plot.bar(x='col1', y='col2', width=0.7)\n\nFlächendiagramm\n~~~~~~~~~~~~~~~\n\n::\n\n df.plot.area(stacked=True, alpha=1.0)\n\nBox-and-Whisker-Plot\n~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.plot.box()\n\nHistogramm über eine Spalte\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df['col1'].plot.hist(bins=3)\n\nHistogramm über alle Spalten\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n df.plot.hist(bins=3, alpha=0.5)\n\nzuletzt generiertes Diagramm speichern\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n::\n\n plt.savefig('pop.png')\n\n" }, { "alpha_fraction": 0.732292890548706, "alphanum_fraction": 0.7478991746902466, "avg_line_length": 19.825000762939453, "blob_id": "7d3dc8175bb52eb3b8c205601f106609cf5d9b44", "content_id": "6845588d40bdc973c7eef5832b2897fe719003cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 838, "license_type": "no_license", "max_line_length": 226, "num_lines": 40, "path": "/pandas_grundlagen/README.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Einstieg in pandas\n\n![Kurzübersicht zu `pandas`: die meisten Daten in diesem Kurs sind als Tabellen vom Typ pd.DataFrame abgelegt. DataFrames bestehen aus einem Index und mehreren Spalten. Jede Spalte hat den Typ pd.Series](pandas_uebersicht.png)\n\n## Die Arbeitsumgebung zur interaktiven Datenanalyse\n\n### Aufgabe 1\n\nstarte die IPython Konsole über Anaconda\n\n### Aufgabe 2\n\nImportiere pandas:\n\n import pandas as pd\n\n### Aufgabe 3\n\nLade Daten zur Weltbevölkerung:\n\n df = pd.read_csv('grosse_laender_2015.csv', index_col=0)\n\n### Aufgabe 4\n\nProbiere die 20 Pandas-Befehle von den Karten aus.\n\n### Aufgabe 5\n\nGruppiert die Befehle gemeinsam so, wie Ihr es für sinnvoll haltet.\n \n\n## Extras\n\n* Jupyter Notebooks\n* Neuerungen in Python 3\n\n\n### Quelle\n\nDaten zur Verfügung gestellt von [www.gapminder.org](http://www.gapminder.org)" }, { "alpha_fraction": 0.6011627912521362, "alphanum_fraction": 0.6234496235847473, "avg_line_length": 12.127226829528809, "blob_id": "373111ddc1e64e4b704bfb130e52f2c2ef0ec82b", "content_id": "92aedd08e67dc17e51ea3c068f735a44da7631a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5188, "license_type": "no_license", "max_line_length": 62, "num_lines": 393, "path": "/cheatsheet/cheatsheet.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# pandas Cheat Sheet\n\n## Einstieg\n\n### pandas importieren\n\n```python\nimport pandas as pd\n```\n\n### Series erstellen\n\n```python\ns = pd.Series([1, 2, 3], index=['A', 'B', 'C'], name='col1')\n```\n\n### DataFrame erstellen\n\n```python\ndata = [[1, 4], [2, 5], [3, 6]]\nindex = ['A', 'B', 'C']\ndf = pd.DataFrame(data, index=index, columns=['col1', 'col2'])\n```\n\n### DataFrame laden\n\n```python\ndf = pd.read_csv('dateiname.csv', \n sep=',', \n names=['col1', 'col2'], \n index_col=0, \n encoding='utf-8',\n nrows=3)\n```\n\n## Zeilen und Spalten indizieren\n\n### eine Spalte auswählen\n\n```python\ndf['col1']\n```\n\n### mehrere Spalten auswählen\n\n```python\ndf[['col1', 'col2']]\n```\n \n### erste n Zeilen anzeigen\n\n```python\ndf.head(2)\n```\n\n### letzte n Zeilen anzeigen\n\n```python\ndf.tail(2)\n```\n\n### Zeilen nach Index-Werten auswählen\n\n```python\ndf.loc['A']\ndf.loc[['A', 'B']]\n```\n\n### Zeilen nach Position auswählen\n\n```python\ndf.loc[1]\ndf.loc[1:]\n```\n\n## Datenaufbereitung\n\n### nach Werten filtern\n\n```python\ndf[df['col1'] > 1]\n```\n\n### nach Spalten sortieren\n\n```python\ndf.sort(['col2', 'col2'], ascending=[False, True])\n```\n\n### doppelte Zeilen identifizieren\n\n```python\ndf.duplicated()\n```\n\n### eindeutige Zeilen finden\n\n```python\ndf['col1'].unique()\n```\n\n### Spalten und Zeilen vertauschen\n\n```python\ndf = df.transpose()\n```\n\n### eine Spalte löschen\n\n```python\ndel df['col2']\n```\n\n### ganzes DataFrame kopieren\n\n```python\nkopie = df.copy()\n```\n\n### Mehrere DataFrames vertikal verketten\n\n```python\ndf2 = df + 10\npd.concat([df, df2])\n```\n\n## DataFrames horizontal verbinden\n\n```python\ndf3 = pd.DataFrame([[1, 7], [8, 9]], \n\t index=['B', 'D'], \n\t columns=['col1', 'col3'])\n```\n\n### nur komplett definierte Zeilen (INNER JOIN)\n\n```python\ndf.merge(df3)\n```\n\n### linke Spalte bleibt vollständig (LEFT OUTER JOIN)\n\n```python\ndf.merge(df3, how='left')\n```\n\n### rechte Spalte bleibt vollständig (RIGHT OUTER JOIN)\n\n```python\ndf.merge(df3, how='right')\n```\n\n### alle Einträge vollständig (OUTER JOIN)\n\n```python\ndf.merge(df3, how='outer')\n```\n\n### Zeilen über Indices zusammenführen\n\n```python\ndf.merge(df3, left_index=True, right_index=True\n```\n\n### unbesetzte Werte auffüllen oder löschen\n\n```python\ndf.fillna(0.0)\ndf.dropna()\n```\n\n### eigene Funktion anwenden\n\n```python\ndef func(x): return 2**x\ndf.apply(func)\n```\n\n## Arithmetik und Statistik\n\n### Addition zu allen Werten \n\n```python\ndf + 10\n```\n\n### Summe über Spalten\n\n```python\ndf.sum()\n```\n\n### kumulative Summe über Spalten\n\n```python\ndf.cumsum()\n```\n\n### Mittelwert über Spalten\n\n```python\ndf.mean()\n```\n\n### Standardabweichung über Spalten\n\n```python\ndf.std()\n```\n\n### Häufigkeit aller Werte ausgeben\n\n```python\ndf['col1'].value_counts()\n```\n\n### Deskriptive Statistiken für Spalten\n\n```python\ndf.describe()\n```\n\n## hierarchische Indizierung\n\n### hierarchischen Index erstellen\n\n```python\ndf.stack()\n```\n\n### hierarchischen Index auflösen\n\n```python\ndf.unstack()\n```\n\n## Aggregation\n\n### Gruppen bilden\n\n```python\ng = df.groupby('col1')\n```\n\n### über Gruppen iterieren\n\n```python\nfor i, group in g:\n print(i, group)\n```\n\n### Gruppen aggregieren\n\n```python\ng.sum()\ng.prod()\ng.mean()\ng.std()\ng.describe()\n```\n\n### Spalten aus Gruppen auswählen\n\n```python\ng['col2'].sum()\ng[['col2', 'col3']].sum()\n```\n\n### Spalten transformieren\n\n import math\n g.transform(math.log)\n\n### eigene Listenfunktion auf jede Gruppe anwenden\n\n```python\ndef strsum(group):\n return ''.join([str(x) for x in group.values])\ng['col2'].apply(strsum)\n```\n\n## Datenexport\n\n### Daten als NumPy-Array\n\n```python\ndf.values\n```\n\n### Daten als CSV-Datei speichern\n\n```python\ndf.to_csv('ausgabe.csv', sep=\",\")\n```\n\n### DataFrame als Tabellen-String formatieren\n\n```python\ndf.to_string()\n```\n\n### DataFrame zu Dictionary konvertieren\n\n<<<<<<< HEAD\n df.to_dict()\n\n### DataFrame als Excel-Tabelle speichern\n\n df.to_excel('ausgabe.xlsx')\n\n(benötigt Paket `xlwt`)\n\n## Visualisierung\n\n### matplotlib importieren\n\n import pylab as plt\n\n### Neues Diagramm beginnen\n\n plt.figure()\n=======\n```python\ndf.to_dict()\n```\n\n## Visualisierung\n\n```python\nimport pylab as plt\nplt.figure()\n```\n>>>>>>> fc7b59d7502b12be0b663c42041b8c9a55cc479f\n\n### Streudiagramm\n\n```python\ndf.plot.scatter('col1', 'col2', style='ro')\n```\n\n### Balkendiagramm\n\n```python\ndf.plot.bar(x='col1', y='col2', width=0.7)\n```\n\n### Flächendiagramm\n\n```python\ndf.plot.area(stacked=True, alpha=1.0)\n```\n\n### Box-and-Whisker-Plot\n\n```python\ndf.plot.box()\n```\n\n### Histogramm über eine Spalte\n\n```python\ndf['col1'].plot.hist(bins=3)\n```\n\n### Histogramm über alle Spalten\n\n```python\ndf.plot.hist(bins=3, alpha=0.5)\n```\n\n### Achsenmarkierungen einstellen\n\n labels = ['A', 'B', 'C', 'D']\n positions = [1.0, 2.0, 3.0, 4.0]\n plt.xticks(positions, labels)\n plt.yticks(positions, labels)\n\n### Zu plottenden Bereich wählen\n \n plt.axis([0.0, 2.5, 0.0, 10.0])\n # [x von, x bis, y von, y bis]\n\n### Diagramm und Achsen beschriften\n\n plt.title('Korrelation')\n plt.xlabel('Nunstück')\n plt.ylabel('Slotermeyer')\n\n### zuletzt generiertes Diagramm speichern\n\n plt.savefig('plot.png')\n plt.savefig('plot.png', dpi=300)\n plt.savefig('plot.svg')\n" }, { "alpha_fraction": 0.6898339986801147, "alphanum_fraction": 0.7271783947944641, "avg_line_length": 30.064516067504883, "blob_id": "84013b11541ce28d055e405e7527fe2c58458d95", "content_id": "1cfe6201e24cb71777c33a92aafd0c6a633b4d47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 970, "license_type": "no_license", "max_line_length": 181, "num_lines": 31, "path": "/projekt_songtexte/teil1_songliste.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Teil 1: Songliste herunterladen\n\n## Aufgabe 1.1\n\nBesuche im Browser die Seite [azlyrics.com](http://www.azlyrics.com). Suche Dir einen beliebigen Interpreten aus (außer **Madonna**, **Eminem** und **Beatles**, die habe ich schon).\n\nNotiere Dir die URL.\n\n## Aufgabe 1.2\n\nErstelle eine Python-Datei `download.py`.\n\n## Aufgabe 1.3\n\nVerwende das Modul `requests`, um die Seite mit der Songliste des gewählten Interpreten herunterzuladen.\n\nDamit das funktioniert, müssen wir so tun als wäre Python ein Browser. Das geht mit:\n\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1)\n AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n seite = requests.get(url, headers=headers)\n\n## Aufgabe 1.4\n\nSpeichere die heruntergeladene Seite in einer HTML-Datei.\n\n#### Hinweis:\n\nWenn in der Seite bestimmte Sonderzeichen vorkommen, mußt Du die Datei zum Schreiben öffnen mit:\n\n f = open(dateiname, 'w', encoding='utf-8')\n" }, { "alpha_fraction": 0.7501769065856934, "alphanum_fraction": 0.7558386325836182, "avg_line_length": 18.901409149169922, "blob_id": "58038e5491a29783997389dfbe0897410261d8df", "content_id": "0c3c9402d57807ca5f49338657772156a3dac8ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1423, "license_type": "no_license", "max_line_length": 71, "num_lines": 71, "path": "/leftovers.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "## Leftovers\n\n\n## Beispieldatensätze\n\n* Gapminder-Datensatz (Grundlagen + Datenaufbereitung + Visualisierung)\n* imdb (Aggregation)\n* Flugzeugabstürze ODER Terrorismusdatenbank (Zeitreihen + Geo)\n\n## Lizenz\n\nCC-BY-SA\n\n## Danksagungen\n\n* Dinu Gherman für den Anstoß zum Entwickeln der pandas-100 Lernkarten\n\n\n### Problem: Bestehende Spalte als zusätzliche Ebene in den Index\n merge + concat mit hierarchischer Indizierung\n stack\n unstack\n Optionen: level, on rows, on cols\n sortlevel\n swaplevel\n\n### Aufgabe 5\n\nErstelle ein neues DataFrame\n\n erstellen aus: Series, dict, list\n erstellen, Optionen: index, columns (p.124)\n\n## Kreuztabellen \n* Kreuztabellen\n crosstab\n* Pivot-Tabellen\n pivot_table\n\n\n * %matplotlib inline\n* Arten von Diagrammen\n * bar\n * scatterplot\n * heatmap\n * multi-panel figures\n* qualitativ hochwertige Diagramme generieren\n* matplotlib gallery\nandere: Bokeh, D3.js, seaborn, http://home.gna.org/veusz/\n\n### NICHT MACHEN\n\n* Panel\n* regex aus pandas\n* permutation\n* random sampling\n* Bäume in pandas\n* Taxonomie NCBI\n* Suche in geographischen Daten (kann pandas nicht!)\n\n### Einblick in maschinelles Lernen\n\n* was kann maschinelles Lernen und was nicht?\n* Modellbildung in scikit-learn\n* Validierung von Modellen\n* Fallstricke\n* Umgang mit großen Datensätzen\n* weitere Python-Pakete zur Datenmodellierung\n\n* Standardbeispiel zu Scikit-learn\n* Grimms Märchen Volltext (für scikit-learn)\n" }, { "alpha_fraction": 0.7102040648460388, "alphanum_fraction": 0.7170068025588989, "avg_line_length": 16.023256301879883, "blob_id": "e31f70e48a1a3f73edabbb416d33995ff04195c5", "content_id": "e82fbb917056ece88588a0d3c819ec27fb6ad085", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 738, "license_type": "no_license", "max_line_length": 55, "num_lines": 43, "path": "/datenaufbereitung/werte_veraendern.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Werte verändern\n\n### Problem: Fehlende Daten mit anderem DF ausbessern\n combine_first\n\n### Problem: Werte durch andere ersetzen\n replace\n\n### Problem: Fehlende Werte füllen\n fillna\n\n### Problem: 0 / 1 Matrix erstellen\n get_dummies (p.212)\n\n### Problem: Werte aus dictionary einsetzen\n add mapping, df.map\n\n### Problem: Funktion auf Spalte anwenden\n apply\n applymap\n\n\n### Problem: DataFrame kopieren\n\n df.copy\n\n### Problem: neue Spalte hinzufügen\n\n Zuweisung: new column, range, with gaps\n\n### Problem: DataFrame oder Index umbenennen\n name\n index.name\n\n### Problem: Sortieren nach einer oder mehreren Spalten\n sort\n\n### Problem: Rangliste erstellen\n ranking\n\n### Problem: Korrelationsmatrix erstellen\n corr\n cov\n\n\n" }, { "alpha_fraction": 0.6188769340515137, "alphanum_fraction": 0.6379928588867188, "avg_line_length": 19.899999618530273, "blob_id": "139e83dd86a9963d7bd243c644d888d41442ba9f", "content_id": "f7085031165ef42369d449f7303914f2d4a219f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 837, "license_type": "no_license", "max_line_length": 73, "num_lines": 40, "path": "/datenvisualisierung/hexpanda.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\nimport pandas as pd\nimport pylab as plt\nimport numpy as np\nimport random\nfrom PIL import Image as im\n\npanda = im.open('panda.png')\npanda = panda.convert('L')\n\nwidth, height = panda.size\ngmap = list(panda.getdata())\ngmap = np.array(gmap)\ngmap = gmap.reshape((height, width))\n\ng = gmap - 255\ng = -1 * g\np = pd.DataFrame(g)\n\npp = p.unstack()\npp = pp[pp > 0]\n\npp.to_csv('panda.csv')\npr = pd.read_csv('panda.csv', names=['x', 'y', 'col'])\npr['y'] *= -1\n\n\ndef sample(val):\n ri = random.randint(1, 1024)\n return ri <= val\n\n\npandasample = pr['col'].apply(sample)\npr = pr[pandasample]\n\npr.plot.hexbin(x='x', y='y', gridsize=24, cmap=plt.get_cmap('Greys'))\nplt.savefig('hexpanda.svg')\n\n#for i in range(1, 50):\n# pr.plot.hexbin(x='x', y='y', gridsize=i, cmap=plt.get_cmap('Greys'))\n# plt.savefig('hexpandas/hexpanda_{}.png'.format(i))\n" }, { "alpha_fraction": 0.43724459409713745, "alphanum_fraction": 0.483946293592453, "avg_line_length": 9.375757217407227, "blob_id": "9f17f73f5e238047c93641036865520f0f24c556", "content_id": "587c39272ee483f38f9c8aefc83db0816aa27162", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1877, "license_type": "no_license", "max_line_length": 67, "num_lines": 165, "path": "/pandas_grundlagen/karten.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# pandas Einführungsübungen\n\n#### 1. bla (★☆☆)\n\n```python\ndf.values\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf = df.transpose()\n```\n\n#### 1. bla (★☆☆)\n\n```python\nger = pd.Series([80000000, 1.4], index=['population', 'fertility'])\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndef anfangsbuchstabe(s): return s[0]\ndf['initial'] = df['continent'].apply(anfangsbuchstabe)\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf.index\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf['continent']```\n\n#### 1. bla (★☆☆)\n\n```python\ndf['population'] > 200000000\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf[df['population'] > 200000000]\n```\n\n#### 1. bla (★☆☆)\n\n```python\n'Russia' in df\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf['population'] / 1000000\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf['population'] + 1000000\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf[['population', 'continent']]\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf.loc[3:7]\n```\n\n#### 1. bla (★☆☆)\n\n```python\n# p.143/144\ndf.describe()\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf['population'].mean()\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf['population'].sum()\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf.cumsum()\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf.head(3)\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf.tail(3)\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf.shape\n```\n\n#### 1. bla (★☆☆)\n\n```python\nimport pylab as plt\ndf.plot('population', 'fertility', style='ro')\nplt.savefig('pop.png')\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf['continent'].value_counts()\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf.groupby('continent')['population'].sum()\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf.stack()\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf.sort_values(['continent', 'fertility'])\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndf['continent'].unique()\n```\n\n#### 1. bla (★☆☆)\n\n```python\ndel df['fertility']\n" }, { "alpha_fraction": 0.7270659804344177, "alphanum_fraction": 0.7467778325080872, "avg_line_length": 24.843137741088867, "blob_id": "eee0800c3ff48a985d88f17ea1b0937997996660", "content_id": "0bcabef204b1b656f07dc6f2ad48e39c16d10e41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1325, "license_type": "no_license", "max_line_length": 171, "num_lines": 51, "path": "/projekt_songtexte/teil3_songs_herunterladen.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Teil 3: Songtexte herunterladen\n\n## Aufgabe 3.1\n\nErstelle Dir ein Verzeichnis, in dem Du die Songtexte speichern möchtest.\n\n## Aufgabe 3.2\n\nLösche sämtliche Leer- und Sonderzeichen aus dem Songtitel, um einen Dateinamen zu erhalten.\n\nFüge dem Dateinamen als Endung `.html` hinzu.\n\n## Aufgabe 3.3\n\nNimm **nur den ersten Song der Liste**. Erstelle aus dem Link zu diesem Song eine vollständige URL. \n\n\n## Aufgabe 3.4\n\nLade einen *einzelnen* Song herunter und speichere diesen in einer Textdatei.\n\n## Aufgabe 3.5\n\nVerpacke das Herunterladen eines Songs in einer Funktion:\n\n def song_herunterladen(titel, link):\n # Dein Code hierhin\n\nVerwende wie in Aufgabe 1 die `headers`, um einen Browser zu simulieren.\n\n## Aufgabe 3.6\n\nVerwende das Modul `time`, um nach dem Herunterladen und Speichern eines Songs 120 Sekunden zu warten:\n\n import time\n time.sleep(120)\n\n**DIES IST DER WICHTIGSTE SCHRITT IN DER HEUTIGEN AUFGABE. WENN EINER VON UNS DIESEN VERGISST, KANN ES GANZ LEICHT PASSIEREN, DASS DER SERVER DIE GANZE KLASSE AUSSPERRT.**\n\n## Aufgabe 3.7\n\nVerwende die Funktion `song_herunterladen`, um **die ersten 20 Songs** herunterzuladen.\n\n## Aufgabe 3.8\n\nPrüfe vor dem Herunterladen, ob eine Datei schon existiert:\n\n import os\n if os.path.exists()\n\nLade nur Dateien herunter, die es noch nicht gibt.\n" }, { "alpha_fraction": 0.7729730010032654, "alphanum_fraction": 0.7729730010032654, "avg_line_length": 21.625, "blob_id": "5a8983c4e92ba694cd55badc81de734dc354f4fe", "content_id": "8405a1219949af7eaafad014ec0a7ce91c5cef6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 186, "license_type": "no_license", "max_line_length": 62, "num_lines": 8, "path": "/geodaten/README.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Geographische Daten\n\nKomplettes end-to-end-Beispiel mit Karten zeichnen zeigen\n(z.B. Flugzeugabstürze)\n\n## Basemap\n\nbasemap kann mit conda installiert werden, aber nicht mit pip.\n\n\n\n" }, { "alpha_fraction": 0.7907965779304504, "alphanum_fraction": 0.79350346326828, "avg_line_length": 23.38679313659668, "blob_id": "9ba73e5507e5fbfb888c607f2816f0940f17c50f", "content_id": "61929e0a9b5a89af336a759b47c4432685373ba5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2602, "license_type": "no_license", "max_line_length": 151, "num_lines": 106, "path": "/README.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Datenanalyse in Python\n\n## Ergebnis\n\nNach diesem Kurs können Sie tabellarische Daten mit Python automatisiert aufbereiten, zusammenfassen und Diagramme erstellen.\n\n## Zielgruppe\n\nAnalysten, Wissenschaftler und Ingenieure, die größere Datenmengen effizienter handhaben möchten.\n\n## Voraussetzungen\n\nGrundkenntnisse in Python\n\n## Kursbeschreibung\n\nDie Python-Bibliothek pandas bietet ein praktisches Alltagswerkzeug zur Analyse tabellarischer Daten. \nDieser Kurs verbessert Ihren Werkzeugsatz für die Arbeit mit Datensätzen von wenigen hundert bis einigen Millionen Einträgen in Python. \nDer Kurs behandelt an praktischen Beispielen sowohl die erkundende Datenanalyse, das Ermitteln von Kennzahlen und das Erstellen anschaulicher Grafiken.\nDurch die Integration mit interaktiven Analyseumgebungen wie IPython und Jupyter lassen sich viele Fragestellungen schnell umsetzen.\n\n## Kursdauer \n\n14 Stunden\n\n## Agenda\n\n| Tag 1 | Tag 2 |\n|-------|-------|\n| Einführung in pandas | Aggregatfunktionen |\n| Datenaufbereitung | Analyse von Zeitreihen |\n| Daten zusammenfassen | geographische Daten |\n| Datenvisualisierung | pandas in der Praxis |\n\n## Tag 1\n\n### Einführung in pandas\n\n* Die Arbeitsumgebung zur interaktiven Datenanalyse\n* Kurzübersicht zu `pandas`\n* Series\n* DataFrame\n* Neuerungen in Python 3\n* Jupyter Notebooks\n\n\n### Datenaufbereitung\n\n* CSV- und Excel-Dateien in `pandas` einlesen\n* Daten sortieren\n* Daten filtern\n* Tabellen transponieren\n* Auswahl von Zeilen und Spalten\n* `pandas`-Tabellen speichern\n\n### Daten zusammenfassen\n\n* statistische Kenngrößen ermitteln\n* Tabellen zusammenführen\n* hierarchische Indizierung\n* Kreuztabellen\n* Pivot-Tabellen\n\n### Datenvisualisierung\n\n* Diagramme mit `matplotlib` erstellen\n* `matplotlib` aus `pandas` verwenden\n* Daten in Jupyter notebooks visualisieren\n* Heatmaps\n* Multi-Panel-Diagramme\n* qualitativ hochwertige Diagramme generieren\n* andere Bibliotheken zur Datenvisualisierung\n\n## Tag 2\n\n### Aggregatfunktionen\n\n* Iteration über Zeilen und Spalten\n* Gruppieren\n* Aggregieren\n* Transformieren\n* Anwenden eigener Funktionen\n\n### Analyse von Zeitreihen\n\n* Serien von Datumsstempeln\n* Umskalieren von Zeitreihen\n* Anpassen von Zeitzonen\n* Umgang mit lückenhaften Daten\n* rollender Durchschnitt\n* einfache Prognosen\n\n### Umgang mit geographischen Daten\n\n* Speichern von Koordinaten in `pandas`\n* Zeichnen von Karten mit `Basemap`\n\n\n### Pandas in der Praxis\n\n* Mythen und Fakten\n* Numpy\n* Modellbildung in scikit-learn\n* alternative Programmpakete und Strategien zur Datenmodellierung\n* Umgang mit großen Datenmengen\n* Best Practices\n" }, { "alpha_fraction": 0.7210144996643066, "alphanum_fraction": 0.7427536249160767, "avg_line_length": 25.08108139038086, "blob_id": "45e33809fcc2e321f65affc068e2f517a90f4782", "content_id": "5e49a9a450318bbb87295aab9d2f183acda06327", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1950, "license_type": "no_license", "max_line_length": 188, "num_lines": 74, "path": "/datenvisualisierung/histogramm.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Histogramm\n\nZeichne ein Histogramm der Lebenserwartung für das Jahr 2015. Da uns `pandas` die meiste Arbeit abnimmt, werden wir die Gelegenheit dazu nutzen, das Diagramm möglichst hübsch zu gestalten.\n\n### Schritt 1\n\nDa die Aufbereitung der Daten sich nicht sonderlich von der letzten Übung unterscheidet, kannst Du die Daten und den Code wiederverwenden.\n\nSorge dafür, daß Du die Lebenserwartung für das Jahr 2015 in einem DataFrame `lifeexp` hast.\n\n### Schritt 2\n\nZeichne ein Histogramm mit den Standardeinstellungen.\n\n import pylab as plt\n\n plt.figure()\n plt.hist(lifeexp['spaltenname'])\n plt.savefig('histo.png')\n\n### Schritt 3\n\nProbiere unterschiedliche Werte für die Klassenanzahl aus. Wähle jeweils einen der folgenden Befehle und ersetze den vorher ausgeführten:\n\n plt.hist(lifeexp['spaltenname'], 5)\n plt.hist(lifeexp['spaltenname'], 10)\n plt.hist(lifeexp['spaltenname'], 20)\n\nEntscheide Dich für einen aussagekräftigen Wert.\n\n\n### Schritt 3\n\nNun werden wir das Diagramm verschönern.\n\nBeschrifte das Diagramm. Verwende dazu vor dem Abspeichern die Funktionen:\n\n* plt.title('text')\n* plt.xlabel('text')\n* plt.ylabel('text')\n\n\n### Schritt 4\n\nStelle über die Funktion `plt.axis` den Bildausschnitt ein.\n\n plt.axis([0.0, 0.0, 150.0, 1.0)\n\nFinde passende Zahlen und setze diese in den obigen Befehl ein.\n\n\n### Schritt 5\n\nProbiere nacheinander folgende optionale Parameter der Funktion `plt.hist` aus:\n\n facecolor='green',\n facecolor='#ff0000',\n alpha=0.75,\n histtype='bar',\n\nDie Parameter werden in den Aufruf von `plt.hist` am Ende angefügt.\n\n### Schritt 6\n\nLege die Auflösung beim Schreiben fest.\n\n plt.savefig('histo.png', dpi=150)\n\nProbiere auch, das Diagramm als SVG-Grafik abzuspeichern, indem Du die Endung `.svg` angibst. Welche Vor- und Nachteile haben die Formate PNG und SVG?\n\n\n### Schritt 7\n\nSchreibe 3 Dinge auf, die Du noch am Histogramm verändern / verbessern möchtest. \n" }, { "alpha_fraction": 0.7202454209327698, "alphanum_fraction": 0.733742356300354, "avg_line_length": 25.704917907714844, "blob_id": "27e5bf1c1a1ca1d38dca41f46bc244d881626607", "content_id": "8435056896a2107d52032f3e6c34dcd21d8aa3d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1643, "license_type": "no_license", "max_line_length": 160, "num_lines": 61, "path": "/datenvisualisierung/korrelation.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Eine Korrelation plotten\n\nIn dieser Aufgabe werden wir den Zusammenhang von Lebenserwartung und Fruchtbarkeit untersuchen. Dazu werden wir einen Scatterplot für das Jahr 2015 anfertigen.\n\n### Schritt 1\n\nLade die Datei `gapminder_fertility.csv` in pandas.\n\n import pandas as pd\n\n lifeexp = pd.read_csv('gapminder_total_fertility.csv', index_col=0)\n\n### Schritt 2\n\nVerfahre genauso mit der Datei `gapminder_lifeexpectancy.xlsx`. Speichere es in einem `DataFrame` mit dem Namen `fertility`.\n\n**Du benötigst dazu die Funktion `pd.read_excel`.**\n\n### Schritt 3\n\nPrüfe ob beide Tabellen die gleiche Größe haben\n\n print(lifeexp.shape)\n\n**Wenn sie nicht die gleiche Größe haben, ist das nicht schlimm.**\n\n### Schritt 4\n\nVerbinde beide Tabellen mit der Funktion `merge`. Durch die Einstellung `left_index=True, right_index=True` werden Zeilen mit gleichem Index zusammengeführt.\n\n df = lifeexp.merge(fertility, left_index=True, right_index=True)\n\n### Schritt 5\n\nZeige die Tabelle an. Finde heraus, wie die Spalten für das Jahr 2015 heißen:\n\n print(df.columns)\n\nWähle beide Spalten für ein Jahr aus (**Achtung! Eventuell unterscheiden sie sich im Datentyp**) und schreibe sie in ein neues `DataFrame`.\n\n df = df[['spalte1', 'spalte2']]\n\n### Schritt 6\n\nEliminiere Leerzeilen aus der Tabelle mit:\n\n df = df.dropna()\n\n### Schritt 7\n\nPlotte die beiden Spalten gegeneinander:\n\n import pylab as plt\n df.plot.scatter('spalte1', 'spalte2', style='ro')\n plt.savefig('korrelation.png')\n\n### Schritt 8\n\nBenenne die Spalten um, so daß das Diagramm sinnvoller beschriftet ist:\n\n df['Lebenserwartung'] = df['spalte1']\n" }, { "alpha_fraction": 0.6203866600990295, "alphanum_fraction": 0.6783831119537354, "avg_line_length": 24.81818199157715, "blob_id": "e1d9f7542a221dbaa5f4ce9a5195c760b41585cb", "content_id": "5addccbdb707346de810397e092af8e090d23ffc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 569, "license_type": "no_license", "max_line_length": 63, "num_lines": 22, "path": "/datenvisualisierung/tortendiagramm.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\nfrom pylab import figure, title, pie, savefig\n\nnucleotides = 'G', 'C', 'A', 'U'\ncount = [1024, 759, 606, 398]\nexplode = [0.0, 0.0, 0.05, 0.05]\n\ncolors = [\"yellowgreen\", \"limegreen\", \"orchid\", \"mediumorchid\"]\n\n\ndef get_percent(value):\n '''Formats float values in pie slices to percent.'''\n return \"%4.1f%%\" % (value)\n\n\nfigure()\ntitle('Nukleotide in der 23S RNA aus T.thermophilus')\n\npie(count, explode=explode, labels=nucleotides, shadow=True,\n colors=colors, autopct=get_percent)\n\nsavefig('tortendiagramm.png', dpi=150)\nsavefig('tortendiagramm.svg', dpi=150)\n" }, { "alpha_fraction": 0.6520737409591675, "alphanum_fraction": 0.6854838728904724, "avg_line_length": 21.205127716064453, "blob_id": "8f30530a85fbbc8cdd24eb9a049ebfc5f6f5cbe2", "content_id": "3bb309ee420c33f1606926d3e14300a3866bec53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 877, "license_type": "no_license", "max_line_length": 108, "num_lines": 39, "path": "/datenvisualisierung/balkendiagramm.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Ein Balkendiagramm plotten\n\n### Schritt 1\n\nLade die Datei `gapminder_fertility.xslx` in pandas.\n\n import pandas as pd\n\n df = pd.read_csv('gapminder_total_fertility.csv', index_col=0)\n\n### Schritt 2\n\nWähle 3 Jahrgänge aus den Spalten des `DataFrame` aus, z.B.:\n\n df = df[[1950, 1955, 2000]]\n\n### Schritt 3\n\nWähle 4 Länder aus dem Index des `DataFrame`aus, z.B.:\n\n df = df.loc[['Germany', 'India', 'Bulgaria', 'Kenia']]\n\n### Schritt 4\n\nZeichne ein Balkendiagramm mit den Jahren als Balkengruppen:\n\n import pylab as plt\n df.plot.bar()\n plt.savefig('balken.png')\n\n### Schritt 5\n\nÄndere die Größe des Diagramms, indem Du zum Aufruf von `plot.bar` den Parameter `figsize=(5,7)` hinzufügst.\n\n### Schritt 6\n\nZeichne ein weiteres Balkendiagramm. Wähle diesmal nur eine Spalte als Balkengruppe aus:\n\n df.plot.bar(y='1950', width=0.7, color='orange')\n\n" }, { "alpha_fraction": 0.5059076547622681, "alphanum_fraction": 0.5778732299804688, "avg_line_length": 20.627906799316406, "blob_id": "5865ea8ae70bf6e42b301546ea8288fbb2bbf168", "content_id": "abcb715fdb8694fce72ae13c3096f0e004f440e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 931, "license_type": "no_license", "max_line_length": 43, "num_lines": 43, "path": "/datenvisualisierung/multipanel.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\nimport pylab as plt\nimport numpy as np\nimport pandas as pd\n\nxvalues = np.arange(0.0, 20.0, 0.1)\n\ndf = pd.DataFrame({\n 'x': xvalues,\n 'sin': np.sin(xvalues),\n 'cos': np.cos(xvalues),\n 'tan': np.tan(xvalues),\n 'log': np.log(xvalues),\n})\n\nfig = plt.figure()\n\nax1 = fig.add_subplot(2, 2, 1)\nax1.plot(df['x'], df['sin'], 'k--')\nplt.xlabel('x')\nplt.ylabel('$sin(x)$')\nplt.axis([0.0, 20.0, -1.2, 1.2])\n\nax2 = fig.add_subplot(2, 2, 2)\nax2.plot(df['x'], df['cos'], 'r^')\nplt.xlabel('x')\nplt.ylabel('$cos(x)$')\nplt.axis([0.0, 20.0, -1.2, 1.2])\n\nax3 = fig.add_subplot(2, 2, 3)\nax3.plot(df['x'], df['tan'], 'g-')\nplt.xlabel('x')\nplt.ylabel('$tan(x)$')\nplt.axis([0.0, 20.0, 0.0, 30.0])\n\nax4 = fig.add_subplot(2, 2, 4)\nax4.plot(df['x'], df['log'], 'bo')\nplt.xlabel('x')\nplt.ylabel('$log(x)$')\nplt.axis([0.0, 20.0, -5, 5.0])\n\nplt.subplots_adjust(wspace=0.4, hspace=0.4)\nplt.savefig('multipanel.png')\nplt.savefig('multipanel.svg')\n" }, { "alpha_fraction": 0.6678608059883118, "alphanum_fraction": 0.7041965126991272, "avg_line_length": 28.590909957885742, "blob_id": "b633afe1f59b48c514d3af60d0126df1bebe0eeb", "content_id": "ca1fb64a65bcbc40d7eb074ecc6799b4cdacc588", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1973, "license_type": "no_license", "max_line_length": 191, "num_lines": 66, "path": "/datenaggregation/README.md", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\n# Datenaggregation\n\n![Gruppenoperationen in pandas](aggregation.png)\n\n### Aufgabe 1\n\nFühre die Beispiele in `beispiele_gruppen.py` aus, so daß Du die *Gruppenobjekte* `g1` bis `g6` erhälst.\n\n### Aufgabe 2\n\nWende auf jedes Gruppenobjekt eine oder mehrere \nder Aggregatfunktionen aus `beispiele_aggregation.py` an.\n\n\n### Aufgabe 3\n\nErkläre eine zufällig bestimmte Kombination aus Gruppe und Aggregatfunktion\n\n\n### Aufgabe 4\n\nLies den Datensatz von Babynamen für das Jahr 2014 in `pandas` ein. Beantworte folgende Fragen mit Hilfe von Aggregation:\n\n* Wie viele unterschiedliche Namen von Jungen / Mädchen gibt es?\n* Wie viele Jungen / Mädchen gibt es insgesamt?\n* Wie sind Mittelwert/Standardabweichung der Anzahl von Jungen und Mädchen? \n\n### Aufgabe 5\n\nFüge eine zusätzliche Spalte mit dem Anfangs- bzw. Endbuchstaben hinzu. Verwende dazu die Methode `df.apply(funktion)` wie in der `pandas`-Grundlagenübung gezeigt. Beantworte folgende Fragen.\n\n* Welches sind die häufigsten 5 Anfangsbuchstaben?\n* Welches sind die häufigsten 5 Endbuchstaben?\n\n### Aufgabe 6\n\nLies alle 135 Jahrgänge in ein Dataframe ein. Folgende Codeschnipsel könnten dabei nützlich sein:\n\n df['jahr'] = 2015\n\n df = pd.concat([df1, df2, df3, ...])\n\n### Aufgabe 7\n\nBeantworte folgende Fragen:\n\n* Wie viele unterschiedliche Namen gab es pro Jahr?\n* Wie viele Babys gab es pro Jahr?\n* Wie viele Babys gab es pro Jahr nach M/F gegliedert?\n* Wie häufig waren alle Anfangsbuchstaben in einem Jahr?\n* Wie häufig war der Name Maria in jedem Jahrzehnt?\n\n### Aufgabe 8\n\nGegeben ist ein `DataFrame` mit folgender Struktur:\n\n name gender count year first last\n 0 Mary F 7065 1880 M y\n 1 Anna F 2604 1880 A a\n 2 Emma F 2003 1880 E a\n 3 Elizabeth F 1939 1880 E h\n 4 Minnie F 1746 1880 M e\n\nWas tut der folgende Befehl?\n\n pd.crosstab(names['first'], names['last'])\n" }, { "alpha_fraction": 0.582317054271698, "alphanum_fraction": 0.6310975551605225, "avg_line_length": 15.350000381469727, "blob_id": "7c3aec5605d8701f037a21e9a0fbc8898cdcf154", "content_id": "454536e805e948d952a20aa11b968dffa2f80676", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 328, "license_type": "no_license", "max_line_length": 35, "num_lines": 20, "path": "/datenvisualisierung/sinusfunktion.py", "repo_name": "veit-schiele-communications/datenanalyse-in-python", "src_encoding": "UTF-8", "text": "\nimport pylab as plt\nimport numpy as np\nimport pandas as pd\n\nxvalues = np.arange(0.0, 20.0, 0.1)\n\ndf = pd.DataFrame({\n 'x': xvalues,\n 'sin': np.sin(xvalues),\n})\n\nplt.figure()\n\ndf.plot('x', 'sin')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.axis([0.0, 20.0, -1.2, 1.2])\n\nplt.title('Sinusfunktion')\nplt.savefig('sinusfunktion.png')\n" } ]
44
VladimirBalun/RacingWorld
https://github.com/VladimirBalun/RacingWorld
e67cf33c2eb2d7833528313164293cb50a8d91ab
c7e600c5899e3ea78f50bd2f8cd915437bad7789
3b966ac36c2310dd33788f4767efa5562410969b
refs/heads/develop
2022-04-05T11:25:38.347743
2019-12-26T18:36:19
2019-12-26T19:07:51
156,984,042
81
35
Apache-2.0
2018-11-10T13:31:23
2019-09-23T17:40:54
2019-09-23T17:40:52
C++
[ { "alpha_fraction": 0.664383590221405, "alphanum_fraction": 0.6735159754753113, "avg_line_length": 22.89090919494629, "blob_id": "65b7ac0cb8457b4c78b14932cd0d6061bee369f7", "content_id": "7acd5e77ecf5cd91c6600f25475bcb414611e3b5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1314, "license_type": "permissive", "max_line_length": 80, "num_lines": 55, "path": "/Sources/Core/Graphics/Renderer.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <stack>\n#include <glm/mat4x4.hpp>\n\n#include \"Camera.hpp\"\n#include \"FrameCalculator.hpp\"\n#include \"../Helpers/Macroses.hpp\"\n\nnamespace Core::Graphics \n{\n\n namespace SceneGraph\n {\n\n class Node;\n class Scene;\n\n DECL_SMART_PTRS(Node)\n\n }\n\n class Shader;\n\n class Renderer\n {\n public:\n void draw(const SceneGraph::Scene& scene, const std::string& shader_id);\n private:\n void drawNode(SceneGraph::NodeSPtr node);\n void updateCamera() noexcept;\n private:\n Camera m_camera;\n FrameCalculator m_frame_calculator;\n const Shader* m_basic_shader = nullptr;\n std::stack<glm::mat4x4> m_transformations_stack;\n };\n\n}\n" }, { "alpha_fraction": 0.6932989954948425, "alphanum_fraction": 0.7001718282699585, "avg_line_length": 34.54198455810547, "blob_id": "11545c4f2e2bb5fe574b4368bf7b8c95aab663c1", "content_id": "1e809dcc6d373d8089a167500f0fd520050673dd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4656, "license_type": "permissive", "max_line_length": 141, "num_lines": 131, "path": "/Sources/Core/Graphics/Shader.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Shader.hpp\"\n\n#include <glew.h>\n#include <glm/glm.hpp>\n#include <glm/gtc/type_ptr.hpp>\n\n#include \"../Helpers/Debug.hpp\"\n#include \"../Helpers/Macroses.hpp\"\n#include \"../Resources/Text.hpp\"\n\nCore::Graphics::Shader::Shader(const Resources::VertexShaderSPtr vertex_shader, const Resources::FragmentShaderSPtr fragment_shader) noexcept\n{\n const std::string_view vertex_shader_data = vertex_shader->getData();\n const std::string_view fragment_shader_data = fragment_shader->getData();\n const unsigned int vertex_shader_id = compileShader(vertex_shader_data.data(), GL_VERTEX_SHADER);\n const unsigned int fragment_shader_id = compileShader(fragment_shader_data.data(), GL_FRAGMENT_SHADER);\n linkShaders(vertex_shader_id, fragment_shader_id);\n}\n\nvoid Core::Graphics::Shader::use() const noexcept\n{\n if (isValid())\n {\n glUseProgram(m_program_id);\n }\n else\n {\n LOG_WARNING(\"Shader program is invalid, could not used it.\");\n }\n}\n\nbool Core::Graphics::Shader::isValid() const noexcept\n{\n return m_program_id != 0;\n}\n\nvoid Core::Graphics::Shader::setUniformi(const char* name, int value) const noexcept\n{\n const unsigned int location_id = glGetUniformLocation(m_program_id, name);\n LOG_WARNING_IF(location_id < 0, STR(\"Invalid uniform location for '\") + STR(name) + STR(\"'.\"));\n glUniform1i(location_id, value);\n}\n\nvoid Core::Graphics::Shader::setUniformf(const char* name, const float value) const noexcept\n{\n const unsigned int location_id = glGetUniformLocation(m_program_id, name);\n LOG_WARNING_IF(location_id < 0, STR(\"Invalid uniform location for '\") + STR(name) + STR(\"'.\"));\n glUniform1f(location_id, value);\n}\n\nvoid Core::Graphics::Shader::setUniformVector3f(const char* name, const glm::vec3& vector) const noexcept\n{\n const unsigned int location_id = glGetUniformLocation(m_program_id, name);\n LOG_WARNING_IF(location_id < 0, STR(\"Invalid uniform location for '\") + STR(name) + STR(\"'.\"));\n glUniform3f(location_id, vector.x, vector.y, vector.z);\n}\n\nvoid Core::Graphics::Shader::setUniformMatrix4x4f(const char* name, const glm::mat4& matrix) const noexcept\n{\n const unsigned int location_id = glGetUniformLocation(m_program_id, name);\n LOG_WARNING_IF(location_id < 0, STR(\"Invalid uniform location for '\") + STR(name) + STR(\"'.\"));\n glUniformMatrix4fv(location_id, 1, GL_FALSE, value_ptr(matrix));\n}\n\nunsigned int Core::Graphics::Shader::compileShader(const std::string& shader_data, const int shader_type) noexcept\n{\n const unsigned int shader = glCreateShader(shader_type);\n const char* buffer = shader_data.c_str();\n\n glShaderSource(shader, 1, &buffer, NULL);\n glCompileShader(shader);\n\n#ifdef _DEBUG\n int was_compiled_shader = 0;\n glGetShaderiv(shader, GL_COMPILE_STATUS, &was_compiled_shader);\n if (!was_compiled_shader)\n {\n std::array<char, 512> error_log{};\n glGetProgramInfoLog(shader, error_log.max_size(), NULL, error_log.data());\n LOG_WARNING(\"Shader was not compiled. Cause:\" + STR(error_log.data()));\n }\n#endif // _DEBUG\n\n return shader;\n}\n\nvoid Core::Graphics::Shader::linkShaders(const unsigned int vertex_shader, const unsigned int fragment_shader) noexcept\n{\n m_program_id = glCreateProgram();\n glAttachShader(m_program_id, vertex_shader);\n glAttachShader(m_program_id, fragment_shader);\n glLinkProgram(m_program_id);\n\n#ifdef _DEBUG\n int were_linked_shaders = 0;\n glGetProgramiv(m_program_id, GL_LINK_STATUS, &were_linked_shaders);\n if (!were_linked_shaders)\n {\n std::array<char, 512> error_log{};\n glGetProgramInfoLog(m_program_id, error_log.max_size(), NULL, error_log.data());\n LOG_WARNING(\"Shaders were not linked. Cause:\" + STR(error_log.data()));\n }\n#endif // _DEBUG\n\n glDetachShader(m_program_id, vertex_shader);\n glDetachShader(m_program_id, fragment_shader);\n glDeleteShader(vertex_shader);\n glDeleteShader(fragment_shader);\n}\n\nCore::Graphics::Shader::~Shader()\n{\n glDeleteProgram(m_program_id);\n}\n" }, { "alpha_fraction": 0.7114093899726868, "alphanum_fraction": 0.718120813369751, "avg_line_length": 32.11111068725586, "blob_id": "979a64b910bc25199d53f951686046cf51775dcf", "content_id": "40412aecc44aaef95f77bc595966056d9ff860a7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1192, "license_type": "permissive", "max_line_length": 127, "num_lines": 36, "path": "/Sources/Core/Resources/Loaders/SoundLoader.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"SoundLoader.hpp\"\n\n#include <audiere.h>\n\n#include \"../Sound.hpp\"\n#include \"../../Managers/SoundManager.hpp\"\n\nbool Core::Resources::Loaders::SoundLoader::load(Sound& sound, std::string_view sound_file_path, const bool streaming) noexcept\n{\n const audiere::AudioDevicePtr& audio_device = g_sound_manager.getAudioDevice();\n audiere::OutputStreamPtr audio_stream = OpenSound(audio_device, sound_file_path.data(), streaming);\n if (audio_stream)\n {\n sound.setAudioStream(std::move(audio_stream));\n return true;\n }\n\n return false;\n}\n" }, { "alpha_fraction": 0.6417695879936218, "alphanum_fraction": 0.6452168822288513, "avg_line_length": 34.8865966796875, "blob_id": "983ebd8a50a8ca73ba25b5710f6fd72dd1ddd624", "content_id": "21577c2f2476416fd64e472e4125b21429468713", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3481, "license_type": "permissive", "max_line_length": 135, "num_lines": 97, "path": "/Sources/Core/Managers/LocaleManager.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"LocaleManager.hpp\"\n\n#include \"ConfigurationManager.hpp\"\n#include \"../Helpers/Time.hpp\"\n#include \"../Helpers/Debug.hpp\"\n#include \"../Helpers/Macroses.hpp\"\n\nvoid Core::Managers::LocaleManager::initialize()\n{\n#ifdef _DEBUG\n const auto start_time = Helpers::getCurrentTimeInMilliseconds<double>();\n#endif // _DEBUG\n\n if (libxl::Book* book = xlCreateBook())\n {\n const std::string resources_path = STR(g_configuration_manager.getResourcesPath());\n const std::string locales_config_filename = STR(g_configuration_manager.getLocalesConfigurationFilename());\n const std::string locales_config_file_full_path = resources_path + locales_config_filename;\n if (book->load(locales_config_file_full_path.c_str()))\n {\n if (libxl::Sheet* sheet = book->getSheet(0))\n {\n int key_index_cow = 0;\n int data_index_cow = 0;\n findNecessaryColIndexesInSheet(sheet, key_index_cow, data_index_cow);\n readAllStringFromSheet(sheet, key_index_cow, data_index_cow);\n }\n }\n else\n {\n LOG_ERROR(\"'LocaleManager' was not initialized. Cause: file '\" + locales_config_file_full_path + \"' was not loaded.\");\n }\n book->release();\n }\n else\n {\n LOG_ERROR(\"'LocaleManager' was not initialized. Cause: LibXL internal error.\");\n }\n\n#ifdef _DEBUG\n const auto end_time = Helpers::getCurrentTimeInMilliseconds<double>();\n const auto loading_time = end_time - start_time;\n LOG_PROFILING(\"'LocaleManager' was initialized in \" + TO_STR(loading_time) + \"ms.\");\n#endif // _DEBUG\n}\n\nvoid Core::Managers::LocaleManager::findNecessaryColIndexesInSheet(libxl::Sheet* sheet, int& key_index, int& data_index) const noexcept\n{\n const int header_row = 0;\n const std::string key_header_name = \"Key\";\n const std::string data_header_name = g_configuration_manager.getCurrentLanguage();\n for (int col = sheet->firstCol(); col < sheet->lastCol(); ++col)\n {\n const std::string data = sheet->readStr(header_row, col);\n if (data == key_header_name)\n {\n key_index = col;\n }\n if (data == data_header_name)\n {\n data_index = col;\n }\n }\n}\n\nvoid Core::Managers::LocaleManager::readAllStringFromSheet(libxl::Sheet* sheet, const int key_index, const int data_index) noexcept\n{\n for (int row = sheet->firstRow(); row < sheet->lastRow(); ++row)\n {\n const std::string key = sheet->readStr(row, key_index);\n const std::string data = sheet->readStr(row, data_index);\n m_strings.emplace(key, data);\n }\n}\n\nstd::string Core::Managers::LocaleManager::getString(const std::string& key) const noexcept\n{\n const auto& it = m_strings.find(key);\n return (it != end(m_strings)) ? (it->second) : (\"\");\n}\n" }, { "alpha_fraction": 0.6883488297462463, "alphanum_fraction": 0.6947819590568542, "avg_line_length": 32.28571319580078, "blob_id": "9af742dff99301fddac5701f9a0019e9b0da600c", "content_id": "3c07d88860aa4249ac0c332b7b5697f2b4a57799", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1399, "license_type": "permissive", "max_line_length": 105, "num_lines": 42, "path": "/Sources/Core/Resources/Loaders/TextLoader.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"TextLoader.hpp\"\n\n#include <boost/filesystem.hpp>\n#include <boost/iostreams/device/mapped_file.hpp>\n\n#include \"../Text.hpp\"\n#include \"../../Helpers/Debug.hpp\"\n\nbool Core::Resources::Loaders::TextLoader::load(Text& text, std::string_view text_file_path) noexcept\n{\n boost::filesystem::ifstream input_stream(text_file_path.data());\n if (!input_stream.is_open())\n {\n LOG_WARNING(\"Text file '\" + STR(text_file_path) + \"' was not opened for reading.\");\n return false;\n }\n\n const auto file_size = static_cast<std::size_t>(boost::filesystem::file_size(text_file_path.data()));\n std::string buffer{};\n buffer.resize(file_size + 1);\n input_stream.read(buffer.data(), file_size);\n text.setData(std::move(buffer));\n\n return true;\n}\n\n" }, { "alpha_fraction": 0.750423014163971, "alphanum_fraction": 0.7614213228225708, "avg_line_length": 32.771427154541016, "blob_id": "4d1346ea391eb45160488a2c8039c480a7bec889", "content_id": "6088d5b9ab28fc94d6b65ce31bf7219283cc533b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1182, "license_type": "permissive", "max_line_length": 74, "num_lines": 35, "path": "/CMakeLists.txt", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#\n# Copyright 2018 Vladimir Balun\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\ncmake_minimum_required (VERSION 3.10)\n\nproject (RacingWorld)\n\nset (CMAKE_CXX_STANDARD 17)\n\nset_property (GLOBAL PROPERTY USE_FOLDERS ON)\n\nset (PROJECT_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR})\nset (PROJECT_BUILD_DIR ${CMAKE_CURRENT_SOURCE_DIR}/Build)\nset (PROJECT_BIN_DIR ${CMAKE_CURRENT_SOURCE_DIR}/Bin)\nset (PROJECT_SOURCES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/Sources)\nset (PROJECT_TESTS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/Tests)\nset (PROJECT_BENCHMARKS_DIR ${CMAKE_CURRENT_SOURCE_DIR}/Benchmarks)\nset (PROJECT_DEPENDENCIES_DIR ${CMAKE_CURRENT_SOURCE_DIR}/Dependencies)\n\nenable_testing()\n\nadd_subdirectory (Sources)\n" }, { "alpha_fraction": 0.6838983297348022, "alphanum_fraction": 0.6906779408454895, "avg_line_length": 26.44186019897461, "blob_id": "a140a1d8ec3bf1078749ba8637d5a0f7bfffc240", "content_id": "ace1f2ebc0184834017bbd204aac989f0b774dcf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1180, "license_type": "permissive", "max_line_length": 83, "num_lines": 43, "path": "/Sources/Core/Resources/Text.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <string>\n\n#include \"IResource.hpp\"\n#include \"../Helpers/Holders/Polymorphic.hpp\"\n\nnamespace Core::Resources\n{\n\n class Text;\n using VertexShader = Text;\n using FragmentShader = Text;\n\n class Text final : public IResource, public Helpers::Holders::Polymorphic<Text>\n {\n public:\n Text() noexcept = default;\n void setData(std::string&& data) noexcept;\n std::string_view getData() const noexcept;\n public:\n bool load(std::string_view text_path) noexcept override;\n private:\n std::string m_text_data{};\n };\n\n}\n" }, { "alpha_fraction": 0.6176092624664307, "alphanum_fraction": 0.6233932971954346, "avg_line_length": 26.298246383666992, "blob_id": "c278fd7de77e72d360bb3280ef251b9f23d2698e", "content_id": "b189c85a4b6d0e1a55d904606387fe5cce7ea7c8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1556, "license_type": "permissive", "max_line_length": 75, "num_lines": 57, "path": "/Sources/Core/Resources/ResourceTypes.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include \"../ResourcesFWD.hpp\"\n\nnamespace Core::Resources \n{\n\n enum class ResourceType\n {\n UNKNOWN = 0,\n MATERIAL_TYPE,\n SOUND_TYPE,\n MODEL_TYPE,\n IMAGE_TYPE,\n TEXT_TYPE,\n MAP_TYPE,\n\n // Must be last\n COUNT_TYPES\n };\n\n template<typename T>\n constexpr ResourceType getResourceType() noexcept\n {\n if (std::is_same<T, Material>::value)\n return ResourceType::MATERIAL_TYPE;\n if (std::is_same<T, Model>::value)\n return ResourceType::MODEL_TYPE;\n if (std::is_same<T, Sound>::value)\n return ResourceType::SOUND_TYPE;\n if (std::is_same<T, Image>::value)\n return ResourceType::IMAGE_TYPE;\n if (std::is_same<T, Text>::value)\n return ResourceType::TEXT_TYPE;\n if (std::is_same<T, Map>::value)\n return ResourceType::MAP_TYPE;\n\n return ResourceType::UNKNOWN;\n }\n\n}\n" }, { "alpha_fraction": 0.6049594879150391, "alphanum_fraction": 0.6172354817390442, "avg_line_length": 42.32978820800781, "blob_id": "55dfa23b702def980f6c05cab7f09b4838263481", "content_id": "4f92bd15012d73988bb502876b03528e81429a0b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4073, "license_type": "permissive", "max_line_length": 100, "num_lines": 94, "path": "/Dependencies/LibXL/Include/IFormatT.h", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#ifndef LIBXL_IFORMATT_H\n#define LIBXL_IFORMATT_H\n\n#include \"setup.h\"\n#include \"enum.h\"\n\nnamespace libxl {\n\n template<class TCHAR> struct IFontT;\n\n template<class TCHAR>\n struct IFormatT\n {\n virtual IFontT<TCHAR>* XLAPIENTRY font() const = 0;\n virtual bool XLAPIENTRY setFont(IFontT<TCHAR>* font) = 0;\n\n virtual int XLAPIENTRY numFormat() const = 0;\n virtual void XLAPIENTRY setNumFormat(int numFormat) = 0;\n\n virtual AlignH XLAPIENTRY alignH() const = 0;\n virtual void XLAPIENTRY setAlignH(AlignH align) = 0;\n\n virtual AlignV XLAPIENTRY alignV() const = 0;\n virtual void XLAPIENTRY setAlignV(AlignV align) = 0;\n\n virtual bool XLAPIENTRY wrap() const = 0;\n virtual void XLAPIENTRY setWrap(bool wrap = true) = 0;\n\n virtual int XLAPIENTRY rotation() const = 0;\n virtual bool XLAPIENTRY setRotation(int rotation) = 0;\n\n virtual int XLAPIENTRY indent() const = 0;\n virtual void XLAPIENTRY setIndent(int indent) = 0;\n\n virtual bool XLAPIENTRY shrinkToFit() const = 0;\n virtual void XLAPIENTRY setShrinkToFit(bool shrinkToFit = true) = 0;\n\n virtual void XLAPIENTRY setBorder(BorderStyle style = BORDERSTYLE_THIN) = 0;\n virtual void XLAPIENTRY setBorderColor(Color color) = 0;\n\n virtual BorderStyle XLAPIENTRY borderLeft() const = 0;\n virtual void XLAPIENTRY setBorderLeft(BorderStyle style = BORDERSTYLE_THIN) = 0;\n\n virtual BorderStyle XLAPIENTRY borderRight() const = 0;\n virtual void XLAPIENTRY setBorderRight(BorderStyle style = BORDERSTYLE_THIN) = 0;\n\n virtual BorderStyle XLAPIENTRY borderTop() const = 0;\n virtual void XLAPIENTRY setBorderTop(BorderStyle style = BORDERSTYLE_THIN) = 0;\n\n virtual BorderStyle XLAPIENTRY borderBottom() const = 0;\n virtual void XLAPIENTRY setBorderBottom(BorderStyle style = BORDERSTYLE_THIN) = 0;\n\n virtual Color XLAPIENTRY borderLeftColor() const = 0;\n virtual void XLAPIENTRY setBorderLeftColor(Color color) = 0;\n\n virtual Color XLAPIENTRY borderRightColor() const = 0;\n virtual void XLAPIENTRY setBorderRightColor(Color color) = 0;\n\n virtual Color XLAPIENTRY borderTopColor() const = 0;\n virtual void XLAPIENTRY setBorderTopColor(Color color) = 0;\n\n virtual Color XLAPIENTRY borderBottomColor() const = 0;\n virtual void XLAPIENTRY setBorderBottomColor(Color color) = 0;\n\n virtual BorderDiagonal XLAPIENTRY borderDiagonal() const = 0;\n virtual void XLAPIENTRY setBorderDiagonal(BorderDiagonal border) = 0;\n\n virtual BorderStyle XLAPIENTRY borderDiagonalStyle() const = 0;\n virtual void XLAPIENTRY setBorderDiagonalStyle(BorderStyle style) = 0;\n\n virtual Color XLAPIENTRY borderDiagonalColor() const = 0;\n virtual void XLAPIENTRY setBorderDiagonalColor(Color color) = 0;\n\n virtual FillPattern XLAPIENTRY fillPattern() const = 0;\n virtual void XLAPIENTRY setFillPattern(FillPattern pattern) = 0;\n\n virtual Color XLAPIENTRY patternForegroundColor() const = 0;\n virtual void XLAPIENTRY setPatternForegroundColor(Color color) = 0;\n\n virtual Color XLAPIENTRY patternBackgroundColor() const = 0;\n virtual void XLAPIENTRY setPatternBackgroundColor(Color color) = 0;\n\n virtual bool XLAPIENTRY locked() const = 0;\n virtual void XLAPIENTRY setLocked(bool locked = true) = 0;\n\n virtual bool XLAPIENTRY hidden() const = 0;\n virtual void XLAPIENTRY setHidden(bool hidden = true) = 0;\n\n virtual ~IFormatT() {}\n };\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.6564459800720215, "alphanum_fraction": 0.6662020683288574, "avg_line_length": 25.090909957885742, "blob_id": "02d6450cd96a03f64dcfcec77aabe524e5826338", "content_id": "ba1a2ed36bab2fb258198095e0b23d9af6c5412a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1435, "license_type": "permissive", "max_line_length": 101, "num_lines": 55, "path": "/Sources/Core/Graphics/SceneGraph/Mesh.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <vector>\n\n#include \"Texture2D.hpp\"\n#include \"../../Resources/Model.hpp\"\n#include \"../../Helpers/Macroses.hpp\"\n\nnamespace Core::Resources\n{\n\n FWD_DECL_SMART_PTRS_FOR_CLASS(Image);\n\n}\n\nnamespace Core::Graphics::SceneGraph \n{\n\n class Mesh\n {\n public:\n Mesh() noexcept = default;\n Mesh(Texture2D texture, std::vector<float>&& elements, unsigned int count_elements) noexcept;\n void draw() const noexcept;\n void free() noexcept;\n private:\n void generateIdentifiers();\n void bindDataIdentifiers();\n void unbindDataIdentifiers();\n void fillBuffersData();\n private:\n std::vector<float> m_elements{};\n Texture2D m_texture;\n unsigned int m_vbo = 0u;\n unsigned int m_vao = 0u;\n unsigned int m_count_elements = 0u;\n };\n\n}\n" }, { "alpha_fraction": 0.6305732727050781, "alphanum_fraction": 0.6352055668830872, "avg_line_length": 29.839284896850586, "blob_id": "35eacecaf3e8efb000d15164ff8d8b453bebe336", "content_id": "0fe3e7611f3b4d8e57af2985e1bfe8c705f10441", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1727, "license_type": "permissive", "max_line_length": 85, "num_lines": 56, "path": "/Sources/Core/Graphics/SceneGraph/Scene.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <unordered_map>\n\n#include \"Light.hpp\"\n\nnamespace Core::Graphics\n{\n\n class Shader;\n\n namespace SceneGraph\n {\n\n class Mesh;\n class Node;\n\n class Scene\n {\n public:\n void addMesh(const std::string& shader_id, Mesh&& mesh);\n void addShader(const std::string& shader_id, Shader&& shader);\n void setLight(Light&& scene_light) noexcept;\n void setRootNode(std::shared_ptr<Node> root_node) noexcept;\n std::shared_ptr<Node> getRootNode() const noexcept;\n bool isExistsMesh(const std::string& mesh_id) const noexcept;\n const Light& getLight() const noexcept;\n const Mesh* getMeshByID(const std::string& mesh_id) const noexcept;\n const Shader* getShaderByID(const std::string& shader_id) const noexcept;\n ~Scene();\n private:\n std::unordered_map<std::string, Mesh> m_meshes{};\n std::unordered_map<std::string, Shader> m_shaders{};\n Light m_scene_light{};\n std::shared_ptr<Node> m_root_node = nullptr;\n };\n\n }\n\n}\n" }, { "alpha_fraction": 0.7144615650177002, "alphanum_fraction": 0.7243077158927917, "avg_line_length": 27.017240524291992, "blob_id": "4b174620bb2466bdf62629f05f19c967952e1a5f", "content_id": "c0dc608e942fb7c61018a3903958193d5fb87b20", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1625, "license_type": "permissive", "max_line_length": 85, "num_lines": 58, "path": "/Sources/Core/Graphics/SceneGraph/Light.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Light.hpp\"\n\nvoid Core::Graphics::SceneGraph::Light::setPosition(glm::vec3&& position) noexcept\n{\n m_position = std::move(position);\n}\n\nvoid Core::Graphics::SceneGraph::Light::setAmbientColor(glm::vec3&& color) noexcept\n{\n m_ambient_color = std::move(color);\n}\n\nvoid Core::Graphics::SceneGraph::Light::setDiffuseColor(glm::vec3&& color) noexcept\n{\n m_diffuse_color = std::move(color);\n}\n\nvoid Core::Graphics::SceneGraph::Light::setSpecularColor(glm::vec3&& color) noexcept\n{\n m_specular_color = std::move(color);\n}\n\nconst glm::vec3& Core::Graphics::SceneGraph::Light::getPosition() const noexcept\n{\n return m_position;\n}\n\nconst glm::vec3& Core::Graphics::SceneGraph::Light::getAmbientColor() const noexcept\n{\n return m_ambient_color;\n}\n\nconst glm::vec3& Core::Graphics::SceneGraph::Light::getDiffuseColor() const noexcept\n{\n return m_diffuse_color;\n}\n\nconst glm::vec3& Core::Graphics::SceneGraph::Light::getSpecularColor() const noexcept\n{\n return m_specular_color;\n}\n" }, { "alpha_fraction": 0.6654465794563293, "alphanum_fraction": 0.681551992893219, "avg_line_length": 30.045454025268555, "blob_id": "96af89afab19c61621b8f8a698087c411e0a8a5d", "content_id": "a8eea7ee7548aadc8288572e2a91c86a3f9e2041", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1366, "license_type": "permissive", "max_line_length": 85, "num_lines": 44, "path": "/Sources/Core/Resources/Image.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <cstdint>\n\n#include \"IResource.hpp\"\n#include \"../Helpers/Holders/Polymorphic.hpp\"\n\nnamespace Core::Resources \n{\n\n class Image final : public IResource, public Helpers::Holders::Polymorphic<Image>\n {\n public:\n void setWidth(std::uint16_t width) noexcept;\n void setHeight(std::uint16_t height) noexcept;\n void setData(unsigned char* data) noexcept;\n std::uint16_t getWidth() const noexcept;\n std::uint16_t getHeight() const noexcept;\n const unsigned char* getData() const noexcept;\n bool load(std::string_view image_path) noexcept override;\n ~Image();\n private:\n unsigned char* m_data = nullptr;\n std::uint16_t m_width = 0u;\n std::uint16_t m_height = 0u;\n };\n\n}\n" }, { "alpha_fraction": 0.5169132947921753, "alphanum_fraction": 0.5179703831672668, "avg_line_length": 42.04545593261719, "blob_id": "211acecdd96229f67ba8c16baf23806d617998ef", "content_id": "5b519a8f5c242b1772690fd98cac0b1340f65b86", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 946, "license_type": "permissive", "max_line_length": 85, "num_lines": 22, "path": "/Sources/PrecompiledHeader.cmake", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#-------------------------------------------------------------------\n# This file is part of the CMake build system for OGRE\n# (Object-oriented Graphics Rendering Engine)\n# For the latest info, see http://www.ogre3d.org/\n#\n# The contents of this file are placed in the public domain. Feel\n# free to make use of it in any way you like.\n#-------------------------------------------------------------------\n \n##################################################################\n# Support macro to use a precompiled header\n# Usage:\n# use_precompiled_header(TARGET HEADER_FILE SRC_FILE)\n##################################################################\n \nmacro (add_precompiled_header TARGET HEADER_FILE SRC_FILE)\n get_filename_component (HEADER ${HEADER_FILE} NAME)\n if (MSVC)\n add_definitions (/Yu\"${HEADER}\")\n set_source_files_properties (${SRC_FILE} PROPERTIES COMPILE_FLAGS /Yc\"${HEADER}\")\n endif ()\nendmacro (add_precompiled_header)" }, { "alpha_fraction": 0.6924101114273071, "alphanum_fraction": 0.7163781523704529, "avg_line_length": 24.03333282470703, "blob_id": "ea3e010be66dac6382a9c8904932e0a0357d5819", "content_id": "c7871549bff37420167b8e6990404bae5d714fc5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 751, "license_type": "permissive", "max_line_length": 105, "num_lines": 30, "path": "/Documentation/INSTALLATION.md", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "# Installation guide\n\n## Building of the RacingWorld\n\n### What do you need to building:\n\nFirst of all you need install on your computer necessary programs:\n\n* <a href=\"https://cmake.org/download/\">CMake</a>\n* <a href=\"https://visualstudio.microsoft.com/en/?rr=https%3A%2F%2Fyandex.ru%2F\">Visual Studio 2017</a>\n\nAfter clone a copy of the main RacingWorld git repository by running:\n\n git clone https://github.com/<your-username>/RacingWorld.git\n\nLater run script for building:\n\n cd Scripts\n Build.bat\n\nIf you want build side manually, the use the following:\n\n mkdir Build\n cd Build\n cmake -G \"Visual Studio 15 2017 ..\n // Launch generated project in Visual Studio 2017\n\n---\n\nIf you have any questions, please contact: [email protected]\n" }, { "alpha_fraction": 0.643966555595398, "alphanum_fraction": 0.6696535348892212, "avg_line_length": 36.20000076293945, "blob_id": "12b325ef0fc1dbba5e57522d98b853ddfd58b01d", "content_id": "3a3267e741476452a33c575bdc1052161ebf3ca0", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1674, "license_type": "permissive", "max_line_length": 128, "num_lines": 45, "path": "/Sources/Core/Graphics/SceneGraph/Light.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <glm/vec3.hpp>\n\nnamespace Core::Graphics::SceneGraph \n{\n\n class Light\n {\n public:\n Light() noexcept = default;\n Light(const glm::vec3& position, const glm::vec3& ambient, const glm::vec3& diffuse, const glm::vec3& specular) noexcept\n : m_position(position), m_ambient_color(ambient), m_diffuse_color(diffuse), m_specular_color(specular) {}\n void setPosition(glm::vec3&& position) noexcept;\n void setAmbientColor(glm::vec3&& color) noexcept;\n void setDiffuseColor(glm::vec3&& color) noexcept;\n void setSpecularColor(glm::vec3&& color) noexcept;\n const glm::vec3& getPosition() const noexcept;\n const glm::vec3& getAmbientColor() const noexcept;\n const glm::vec3& getDiffuseColor() const noexcept;\n const glm::vec3& getSpecularColor() const noexcept;\n private:\n glm::vec3 m_position{};\n glm::vec3 m_ambient_color{ 1.0f, 1.0f, 1.0f };\n glm::vec3 m_diffuse_color{ 1.0f, 1.0f, 1.0f };\n glm::vec3 m_specular_color{ 1.0f, 1.0f, 1.0f };\n };\n\n}\n" }, { "alpha_fraction": 0.7289248108863831, "alphanum_fraction": 0.7329631447792053, "avg_line_length": 27.71014404296875, "blob_id": "c7c3cbdcdb32edb90c4801ac7eb2b5962ea9e0ea", "content_id": "e704a86f7f18ce839f43d74e53813e8b1863c8a2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1981, "license_type": "permissive", "max_line_length": 91, "num_lines": 69, "path": "/Sources/Core/Resources/Material.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Material.hpp\"\n\nvoid Core::Resources::Material::setShininess(const float shininess) noexcept\n{\n m_shininess = shininess;\n}\n\nvoid Core::Resources::Material::setAmbientTextureName(std::string&& texture_name) noexcept\n{\n m_ambient_texture_name = std::move(texture_name);\n}\n\nvoid Core::Resources::Material::setDiffuseTextureName(std::string&& texture_name) noexcept\n{\n m_diffuse_texture_name = std::move(texture_name);\n}\n\nvoid Core::Resources::Material::setSpecularTextureName(std::string&& texture_name) noexcept\n{\n m_specular_texture_name = texture_name;\n}\n\nfloat Core::Resources::Material::getShininess() const noexcept\n{\n return m_shininess;\n}\n\nstd::string_view Core::Resources::Material::getAmbientTextureName() const noexcept\n{\n return m_ambient_texture_name;\n}\n\nstd::string_view Core::Resources::Material::getDiffuseTextureName() const noexcept\n{\n return m_diffuse_texture_name;\n}\n\nstd::string_view Core::Resources::Material::getSpecularTextureName() const noexcept\n{\n return m_specular_texture_name;\n}\n\nbool Core::Resources::Material::load(std::string_view material_path) noexcept\n{\n /*\n * At the moment, materials are loading in the 'ModelLoader',\n * with special function of the 'ResourceManager' for it.\n * Maybe later, material will be able to load without 'ModelLoader'.\n */\n\n return false;\n}\n" }, { "alpha_fraction": 0.5962913036346436, "alphanum_fraction": 0.6132895350456238, "avg_line_length": 50.77000045776367, "blob_id": "d3e1a0b0465ab6bb8f2e8d957769cd0a62fb0265", "content_id": "4adfec959030109a1a19130c0545e7b56fffbbf9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5177, "license_type": "permissive", "max_line_length": 164, "num_lines": 100, "path": "/Dependencies/LibXL/Include/IBookT.h", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#ifndef LIBXL_IBOOKT_H\n#define LIBXL_IBOOKT_H\n\n#include <stddef.h>\n#include \"setup.h\"\n#include \"enum.h\"\n\nnamespace libxl {\n\n template<class TCHAR> struct ISheetT;\n template<class TCHAR> struct IFormatT;\n template<class TCHAR> struct IFontT;\n\n template<class TCHAR>\n struct IBookT\n {\n virtual bool XLAPIENTRY load(const TCHAR* filename, const TCHAR* tempFile = 0) = 0;\n virtual bool XLAPIENTRY loadSheet(const TCHAR* filename, int sheetIndex, const TCHAR* tempFile = 0) = 0;\n virtual bool XLAPIENTRY loadPartially(const TCHAR* filename, int sheetIndex, int firstRow, int lastRow, const TCHAR* tempFile = 0) = 0;\n virtual bool XLAPIENTRY loadWithoutEmptyCells(const TCHAR* filename) = 0;\n virtual bool XLAPIENTRY loadInfo(const TCHAR* filename) = 0;\n\n virtual bool XLAPIENTRY save(const TCHAR* filename, bool useTempFile = false) = 0;\n\n virtual bool XLAPIENTRY loadRaw(const char* data, unsigned size, int sheetIndex = -1, int firstRow = -1, int lastRow = -1) = 0;\n virtual bool XLAPIENTRY saveRaw(const char** data, unsigned* size) = 0;\n\n virtual ISheetT<TCHAR>* XLAPIENTRY addSheet(const TCHAR* name, ISheetT<TCHAR>* initSheet = 0) = 0;\n virtual ISheetT<TCHAR>* XLAPIENTRY insertSheet(int index, const TCHAR* name, ISheetT<TCHAR>* initSheet = 0) = 0;\n virtual ISheetT<TCHAR>* XLAPIENTRY getSheet(int index) const = 0;\n virtual const TCHAR* XLAPIENTRY getSheetName(int index) const = 0;\n virtual SheetType XLAPIENTRY sheetType(int index) const = 0;\n virtual bool XLAPIENTRY moveSheet(int srcIndex, int dstIndex) = 0;\n virtual bool XLAPIENTRY delSheet(int index) = 0;\n virtual int XLAPIENTRY sheetCount() const = 0;\n\n virtual IFormatT<TCHAR>* XLAPIENTRY addFormat(IFormatT<TCHAR>* initFormat = 0) = 0;\n virtual IFontT<TCHAR>* XLAPIENTRY addFont(IFontT<TCHAR>* initFont = 0) = 0;\n virtual int XLAPIENTRY addCustomNumFormat(const TCHAR* customNumFormat) = 0;\n virtual const TCHAR* XLAPIENTRY customNumFormat(int fmt) = 0;\n\n virtual IFormatT<TCHAR>* XLAPIENTRY format(int index) = 0;\n virtual int XLAPIENTRY formatSize() = 0;\n\n virtual IFontT<TCHAR>* XLAPIENTRY font(int index) = 0;\n virtual int XLAPIENTRY fontSize() = 0;\n\n virtual double XLAPIENTRY datePack(int year, int month, int day, int hour = 0, int min = 0, int sec = 0, int msec = 0) = 0;\n virtual bool XLAPIENTRY dateUnpack(double value, int* year, int* month, int* day, int* hour = 0, int* min = 0, int* sec = 0, int* msec = 0) = 0;\n\n virtual Color XLAPIENTRY colorPack(int red, int green, int blue) = 0;\n virtual void XLAPIENTRY colorUnpack(Color color, int* red, int* green, int* blue) = 0;\n\n virtual int XLAPIENTRY activeSheet() const = 0;\n virtual void XLAPIENTRY setActiveSheet(int index) = 0;\n\n virtual int XLAPIENTRY pictureSize() const = 0;\n virtual PictureType XLAPIENTRY getPicture(int index, const char** data, unsigned* size) const = 0;\n\n virtual int XLAPIENTRY addPicture(const TCHAR* filename) = 0;\n virtual int XLAPIENTRY addPicture2(const char* data, unsigned size) = 0;\n virtual int XLAPIENTRY addPictureAsLink(const TCHAR* filename, bool insert = false) = 0;\n\n virtual const TCHAR* XLAPIENTRY defaultFont(int* fontSize) = 0;\n virtual void XLAPIENTRY setDefaultFont(const TCHAR* fontName, int fontSize) = 0;\n\n virtual bool XLAPIENTRY refR1C1() const = 0;\n virtual void XLAPIENTRY setRefR1C1(bool refR1C1 = true) = 0;\n\n virtual void XLAPIENTRY setKey(const TCHAR* name, const TCHAR* key) = 0;\n\n virtual bool XLAPIENTRY rgbMode() = 0;\n virtual void XLAPIENTRY setRgbMode(bool rgbMode = true) = 0;\n\n virtual int XLAPIENTRY version() const = 0;\n virtual int XLAPIENTRY biffVersion() const = 0;\n\n virtual bool XLAPIENTRY isDate1904() const = 0;\n virtual void XLAPIENTRY setDate1904(bool date1904 = true) = 0;\n\n virtual bool XLAPIENTRY isTemplate() const = 0;\n virtual void XLAPIENTRY setTemplate(bool tmpl = true) = 0;\n\n virtual bool XLAPIENTRY setLocale(const char* locale) = 0;\n virtual const char* XLAPIENTRY errorMessage() const = 0;\n\n virtual void XLAPIENTRY release() = 0;\n\n virtual ~IBookT() {}\n };\n\n}\n\nextern \"C\" XLAPI libxl::IBookT<char>* XLAPIENTRY xlCreateBookA();\nextern \"C\" XLAPI libxl::IBookT<wchar_t>* XLAPIENTRY xlCreateBookW();\n\nextern \"C\" XLAPI libxl::IBookT<char>* XLAPIENTRY xlCreateXMLBookA();\nextern \"C\" XLAPI libxl::IBookT<wchar_t>* XLAPIENTRY xlCreateXMLBookW();\n\n#endif\n" }, { "alpha_fraction": 0.6956155300140381, "alphanum_fraction": 0.7023608684539795, "avg_line_length": 27.238094329833984, "blob_id": "6cfa1fdd135be784af400f5a0f2a189b9c74b93a", "content_id": "2a0ae08fe8ec81b3ae34d26c79b554447552c45a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1186, "license_type": "permissive", "max_line_length": 107, "num_lines": 42, "path": "/Sources/Core/Managers/PlayerManager.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <string>\n\n#include \"IManager.hpp\"\n#include \"../Helpers/Holders/Singleton.hpp\"\n\n#ifndef g_player_manager\n #define g_player_manager Core::Managers::PlayerManager::getInstance()\n#endif // g_player_manager\n\nnamespace Core::Managers\n{\n\n class PlayerManager : public IManager<PlayerManager>, public Helpers::Holders::Singleton<PlayerManager>\n {\n public:\n void initialize();\n std::string getEmail() const noexcept;\n std::string getPassword() const noexcept;\n private:\n std::string m_email{};\n std::string m_password{};\n };\n\n}\n" }, { "alpha_fraction": 0.5689922571182251, "alphanum_fraction": 0.5813953280448914, "avg_line_length": 30.463415145874023, "blob_id": "ca24943618f71cf376b90cf1e763683f24763ca9", "content_id": "2406bcd95f417deeb8315a250d284b8c36a4f1f2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1290, "license_type": "permissive", "max_line_length": 80, "num_lines": 41, "path": "/Dependencies/LibXL/Include/IFontT.h", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#ifndef LIBXL_IFONTT_H\n#define LIBXL_IFONTT_H\n\n#include \"setup.h\"\n#include \"enum.h\"\n\nnamespace libxl {\n\n template<class TCHAR>\n struct IFontT\n {\n virtual int XLAPIENTRY size() const = 0;\n virtual void XLAPIENTRY setSize(int size) = 0;\n\n virtual bool XLAPIENTRY italic() const = 0;\n virtual void XLAPIENTRY setItalic(bool italic = true) = 0;\n\n virtual bool XLAPIENTRY strikeOut() const = 0;\n virtual void XLAPIENTRY setStrikeOut(bool strikeOut = true) = 0;\n\n virtual Color XLAPIENTRY color() const = 0;\n virtual void XLAPIENTRY setColor(Color color) = 0;\n\n virtual bool XLAPIENTRY bold() const = 0;\n virtual void XLAPIENTRY setBold(bool bold = true) = 0;\n\n virtual Script XLAPIENTRY script() const = 0;\n virtual void XLAPIENTRY setScript(Script script) = 0;\n\n virtual Underline XLAPIENTRY underline() const = 0;\n virtual void XLAPIENTRY setUnderline(Underline underline) = 0;\n\n virtual const TCHAR* XLAPIENTRY name() const = 0;\n virtual bool XLAPIENTRY setName(const TCHAR* name) = 0;\n\n virtual ~IFontT() {}\n };\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.6872246861457825, "alphanum_fraction": 0.6960352659225464, "avg_line_length": 31.35714340209961, "blob_id": "9b83cfe40016169ed0dd75583017717159517bcd", "content_id": "04079921d9c71bcd7fb8ced3fc246ad8a92c160d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 908, "license_type": "permissive", "max_line_length": 107, "num_lines": 28, "path": "/Dependencies/LibXL/Include/IAutoFilterT.h", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#ifndef LIBXL_IAUTOFILTER_H\n#define LIBXL_IAUTOFILTER_H\n\n#include \"setup.h\"\n\nnamespace libxl\n{\n template<class TCHAR> struct IFilterColumnT;\n\n template<class TCHAR>\n struct IAutoFilterT\n {\n virtual bool XLAPIENTRY getRef(int* rowFirst, int* rowLast, int* colFirst, int* colLast) = 0;\n virtual void XLAPIENTRY setRef(int rowFirst, int rowLast, int colFirst, int colLast) = 0;\n\n virtual IFilterColumnT<TCHAR>* XLAPIENTRY column(int colId) = 0;\n\n virtual int XLAPIENTRY columnSize() const = 0;\n virtual IFilterColumnT<TCHAR>* XLAPIENTRY columnByIndex(int index) = 0;\n\n virtual bool XLAPIENTRY getSortRange(int* rowFirst, int* rowLast, int* colFirst, int* colLast) = 0;\n\n virtual bool XLAPIENTRY getSort(int* columnIndex, bool* descending) = 0;\n virtual bool XLAPIENTRY setSort(int columnIndex, bool descending = false) = 0;\n };\n}\n\n#endif\n\n\n" }, { "alpha_fraction": 0.7342767119407654, "alphanum_fraction": 0.7342767119407654, "avg_line_length": 23.5, "blob_id": "0dbecdc1258a55d5885df792a55481fe875afb26", "content_id": "8b32c49d96f7b0eb6b081f91ceb1e5bde8a5739e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 636, "license_type": "permissive", "max_line_length": 43, "num_lines": 26, "path": "/Resources/Resources.ini", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "[Models]\n\t#triangle = Models/Triangle.obj\n\t#ground = Models/Ground/Ground.obj\n\t#cube = Models/Cube/Cube.obj\n\t#tree = Models/Tree/Tree.obj\n\tcrate = Models/Crate/Crate.obj\n\n[Maps]\n\ttest = Maps/TestMap.xml\n\n[Sounds]\t\n\tbackground = Sounds/Background.wav\n\t\n[Textures]\t\n\tsky = Textures/Sky.jpg\n\tground = Textures/Ground.jpg\n\t\n[VertexShaders]\n\tbase_shader_vert = Shaders/BaseShader.vert\n\tfont_shader_vert = Shaders/FontShader.vert\n\ttest_shader_vert = Shaders/TestShader.vert\n\n[FragmentShaders]\t\t\n\tbase_shader_frag = Shaders/BaseShader.frag\n\tfont_shader_frag = Shaders/FontShader.frag\n\ttest_shader_frag = Shaders/TestShader.frag" }, { "alpha_fraction": 0.7085953950881958, "alphanum_fraction": 0.714185893535614, "avg_line_length": 30.755556106567383, "blob_id": "08e6dff27b1f6dd793462892fec80f89e7b7cb03", "content_id": "70804d593959d51d206cddfdf634bc8247198bbe", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1431, "license_type": "permissive", "max_line_length": 113, "num_lines": 45, "path": "/Sources/Core/Managers/LocaleManager.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <string>\n#include <unordered_map>\n#include <libxl.h>\n\n#include \"IManager.hpp\"\n#include \"../Helpers/Holders/Singleton.hpp\"\n\n#ifndef g_locale_manager\n #define g_locale_manager Core::Managers::LocaleManager::getInstance()\n#endif // g_locale_manager\n\nnamespace Core::Managers\n{\n\n class LocaleManager : public IManager<LocaleManager>, public Helpers::Holders::Singleton<LocaleManager>\n {\n public:\n void initialize();\n std::string getString(const std::string& key) const noexcept;\n private:\n void findNecessaryColIndexesInSheet(libxl::Sheet* sheet, int& key_index, int& data_index) const noexcept;\n void readAllStringFromSheet(libxl::Sheet* sheet, int key_index, int data_index) noexcept;\n private:\n std::unordered_map<std::string, std::string> m_strings{};\n };\n\n}\n\n\n" }, { "alpha_fraction": 0.6633869409561157, "alphanum_fraction": 0.670009434223175, "avg_line_length": 33.542484283447266, "blob_id": "7ccfc08dd6dd206a695c218231bdac10993019af", "content_id": "bdd23d479fc037829f0e63af32b0464ba8b62c4e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5285, "license_type": "permissive", "max_line_length": 142, "num_lines": 153, "path": "/Sources/Core/Graphics/SceneGraph/Builders.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Builders.hpp\"\n\n#include \"Node.hpp\"\n#include \"Mesh.hpp\"\n#include \"Scene.hpp\"\n#include \"Texture2D.hpp\"\n#include \"../../Resources/Image.hpp\"\n#include \"../../Managers/ResourceManager.hpp\"\n\n#pragma region NodeBuilder\n\nCore::Graphics::SceneGraph::NodeSPtr Core::Graphics::SceneGraph::NodeBuilder::build(Resources::ModelSPtr model, Scene& scene)\n{\n if (model)\n {\n const std::vector<Resources::Model::Object> objects = model->getObjects();\n if (objects.size() == 1u)\n {\n const Resources::Model::Object& object = objects.front();\n return createNode(object, scene);\n }\n\n if (!objects.empty())\n {\n auto group = std::make_shared<Node>();\n for (const Resources::Model::Object& object : objects)\n {\n NodeSPtr node = createNode(object, scene);\n group->addChild(node);\n }\n return group;\n }\n }\n\n return nullptr;\n}\n\nCore::Graphics::SceneGraph::NodeSPtr Core::Graphics::SceneGraph::NodeBuilder::createNode(const Resources::Model::Object& object, Scene& scene)\n{\n auto node = std::make_shared<Node>();\n node->setMesh(MeshBuilder::build(object, scene));\n return node;\n}\n\n#pragma endregion\n\n#pragma region MeshBuilder\n\nconstexpr std::uint8_t COUNT_ELEMS_IN_POS = 3u;\nconstexpr std::uint8_t COUNT_ELEMS_IN_NORMAL = 3u;\nconstexpr std::uint8_t COUNT_ELEMS_IN_TEXTURE_COORDINATE = 2u;\nconstexpr std::uint8_t COUNT_ELEMS_IN_VERTEX = COUNT_ELEMS_IN_POS + COUNT_ELEMS_IN_NORMAL + COUNT_ELEMS_IN_TEXTURE_COORDINATE;\n\nconst Core::Graphics::SceneGraph::Mesh* Core::Graphics::SceneGraph::MeshBuilder::build(const Resources::Model::Object& object, Scene& scene)\n{\n const std::string& mesh_id = object.getName();\n if (!scene.isExistsMesh(mesh_id)) \n {\n const std::vector<unsigned int>& indices = object.getIndices();\n const std::vector<Resources::Model::Vertex>& vertices = object.getVertices();\n\n std::vector<float> output_elements{};\n output_elements.reserve(getCountElements(vertices.size()));\n for (const auto index : indices)\n {\n const Resources::Model::Vertex& vertex = vertices.at(index);\n const glm::vec3& normal = vertex.getNormal();\n const glm::vec3& position = vertex.getPosition();\n const glm::vec2& texture_coordinate = vertex.getTextureCoordinate();\n\n // Necessary order of the following operations\n addVec3ToElements(output_elements, position);\n addVec3ToElements(output_elements, normal);\n addVec2ToElements(output_elements, texture_coordinate);\n }\n\n Texture2D texture;\n auto material = g_resource_manager.getResource<Resources::Material>(STR(object.getMaterialName()));\n if (material)\n {\n std::string_view diffuse_texture_name = material->getDiffuseTextureName();\n if (!diffuse_texture_name.empty())\n {\n std::shared_ptr<Resources::Image> image = g_resource_manager.getResource<Resources::Image>(STR(diffuse_texture_name));\n texture = TextureBuilder::build(image);\n }\n }\n\n const unsigned int count_elements = getCountElements(output_elements);\n scene.addMesh(mesh_id, Mesh(texture, std::move(output_elements), count_elements));\n }\n\n return scene.getMeshByID(mesh_id);\n}\n\nunsigned Core::Graphics::SceneGraph::MeshBuilder::getCountElements(std::size_t count_vertices) noexcept\n{\n return count_vertices * COUNT_ELEMS_IN_VERTEX;\n}\n\nunsigned int Core::Graphics::SceneGraph::MeshBuilder::getCountElements(std::vector<float>& elements) noexcept\n{\n return elements.size() / COUNT_ELEMS_IN_VERTEX;\n}\n\nvoid Core::Graphics::SceneGraph::MeshBuilder::addVec3ToElements(std::vector<float>& elements, const glm::vec3& vector)\n{\n elements.push_back(vector.x);\n elements.push_back(vector.y);\n elements.push_back(vector.z);\n}\n\nvoid Core::Graphics::SceneGraph::MeshBuilder::addVec2ToElements(std::vector<float>& elements, const glm::vec2& vector)\n{\n elements.push_back(vector.x);\n elements.push_back(vector.y);\n}\n\n#pragma endregion\n\n#pragma region TextureBuilder\n\nCore::Graphics::SceneGraph::Texture2D Core::Graphics::SceneGraph::TextureBuilder::build(Resources::ImageSPtr image)\n{\n if (image)\n {\n const std::uint16_t texture_width = image->getWidth();\n const std::uint16_t texture_height = image->getHeight();\n const unsigned char* texture_data = image->getData();\n return Texture2D(texture_width, texture_height, texture_data);\n }\n\n return Texture2D{};\n}\n\n#pragma endregion\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6697965860366821, "avg_line_length": 16.75, "blob_id": "58f35f1076dc9c90d498f20c93537fcf5d0d85dc", "content_id": "3d75ac40ca63294886178ad0bcfd905f6997763c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 639, "license_type": "permissive", "max_line_length": 73, "num_lines": 36, "path": "/Dependencies/LibXL/Include/setup.h", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#ifndef LIBXL_SETUP_CPP_H\n#define LIBXL_SETUP_CPP_H\n\n#ifdef LIBXL_STDCALL\n #define LIBXL_CALLING __stdcall\n#else\n #define LIBXL_CALLING __cdecl\n#endif\n\n#if !defined(LIBXL_STATIC) && (defined(_MSC_VER) || defined(__WATCOMC__))\n\n #ifdef libxl_EXPORTS\n #define XLAPI __declspec(dllexport)\n #else\n #define XLAPI __declspec(dllimport)\n #endif\n\n #define XLAPIENTRY LIBXL_CALLING\n\n#else\n\n #ifdef libxl_EXPORTS\n #define XLAPI __attribute__ ((visibility (\"default\")))\n #else\n #define XLAPI\n #endif\n\n #if defined(__MINGW32__)\n #define XLAPIENTRY LIBXL_CALLING\n #else\n #define XLAPIENTRY\n #endif\n\n#endif\n\n#endif\n" }, { "alpha_fraction": 0.7089678645133972, "alphanum_fraction": 0.7157360315322876, "avg_line_length": 33.764705657958984, "blob_id": "433b4c8c60296075e8c5bad392282a7b348e525f", "content_id": "1ec293d401eb9536217313acff2c3e6e6a8a6cb2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1182, "license_type": "permissive", "max_line_length": 77, "num_lines": 34, "path": "/Sources/Game/Identifiers.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <string_view>\n\nnamespace Game::ID\n{\n\n inline const std::string_view g_base_vert_shader_id = \"base_shader_vert\";\n inline const std::string_view g_font_vert_shader_id = \"font_shader_vert\";\n inline const std::string_view g_test_vert_shader_id = \"test_shader_vert\";\n\n inline const std::string_view g_base_frag_shader_id = \"base_shader_frag\";\n inline const std::string_view g_font_frag_shader_id = \"font_shader_frag\";\n inline const std::string_view g_test_frag_shader_id = \"test_shader_frag\";\n\n inline const char* g_background_sound = \"background\";\n\n}\n" }, { "alpha_fraction": 0.6111341118812561, "alphanum_fraction": 0.6183775067329407, "avg_line_length": 37.349205017089844, "blob_id": "ae6abaa1624f38fce2e8e27bda0b956e4a370756", "content_id": "29ea4eff23127beb0f7d9d0bfa14d49a1a6e0e67", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4832, "license_type": "permissive", "max_line_length": 136, "num_lines": 126, "path": "/Sources/Core/Resources/Loaders/MapLoader.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"MapLoader.hpp\"\n\n#include \"TextLoader.hpp\"\n#include \"../Text.hpp\"\n#include \"../../Helpers/Debug.hpp\"\n\nconstexpr std::size_t X_COORDINATE = 0u;\nconstexpr std::size_t Y_COORDINATE = 1u;\nconstexpr std::size_t Z_COORDINATE = 2u;\n\nbool Core::Resources::Loaders::MapLoader::load(Map& map, std::string_view map_file_path) noexcept\n{\n try\n {\n bpt::ptree map_data{};\n read_xml(STR(map_file_path), map_data);\n for (const bpt::ptree::value_type& xml_value : map_data.get_child(\"map\"))\n {\n if (xml_value.first == \"dimensions\")\n {\n loadDimensions(map, xml_value);\n }\n if (xml_value.first == \"models\")\n {\n loadObjectModelsIdentifiers(map, xml_value.second);\n }\n if (xml_value.first == \"objects\")\n {\n for (const bpt::ptree::value_type& xml_objects : xml_value.second)\n {\n if (xml_objects.first == \"trees\")\n {\n std::vector<Map::MapObject> tree_objects = getSetMapObjectsFromXML(xml_objects.second);\n map.setTreeObjects(std::move(tree_objects));\n }\n if (xml_objects.first == \"houses\")\n {\n std::vector<Map::MapObject> house_objects = getSetMapObjectsFromXML(xml_objects.second);\n map.setHouseObjects(std::move(house_objects));\n }\n }\n }\n }\n }\n catch (const bpt::xml_parser_error&)\n {\n LOG_ERROR(\"Map file: '\" + STR(map_file_path) + \"' was not read.\");\n return false;\n }\n\n return true;\n}\n\nvoid Core::Resources::Loaders::MapLoader::loadDimensions(Map& map, const bpt::ptree::value_type& xml_value)\n{\n bg::box2f_t dimensions{};\n dimensions.min_corner().set<X_COORDINATE>(xml_value.second.get<float>(\"<xmlattr>.xmin\", 0.0f));\n dimensions.min_corner().set<Y_COORDINATE>(xml_value.second.get<float>(\"<xmlattr>.ymin\", 0.0f));\n dimensions.max_corner().set<X_COORDINATE>(xml_value.second.get<float>(\"<xmlattr>.xmax\", 0.0f));\n dimensions.max_corner().set<Y_COORDINATE>(xml_value.second.get<float>(\"<xmlattr>.ymax\", 0.0f));\n map.setDimensions(dimensions);\n}\n\nvoid Core::Resources::Loaders::MapLoader::loadObjectModelsIdentifiers(Map& map, const bpt::ptree& xml_models)\n{\n for (const bpt::ptree::value_type& xml_model : xml_models)\n {\n const std::string& objects_id = xml_model.second.get<std::string>(\"<xmlattr>.objects\", \"\");\n if (objects_id == \"trees\")\n {\n map.setTreeModelsName(xml_model.second.get<std::string>(\"<xmlattr>.model\", \"\"));\n }\n if (objects_id == \"houses\")\n {\n map.setHouseModelsName(xml_model.second.get<std::string>(\"<xmlattr>.model\", \"\"));\n }\n }\n}\n\nstd::vector<Core::Resources::Map::MapObject> Core::Resources::Loaders::MapLoader::getSetMapObjectsFromXML(const bpt::ptree& xml_objects)\n{\n std::vector<Map::MapObject> map_objects{};\n map_objects.reserve(xml_objects.size());\n for (const bpt::ptree::value_type& xml_object : xml_objects)\n {\n Map::MapObject map_object = getMapObjectFromXML(xml_object);\n map_objects.push_back(std::move(map_object));\n }\n\n return map_objects;\n}\n\nCore::Resources::Map::MapObject Core::Resources::Loaders::MapLoader::getMapObjectFromXML(const bpt::ptree::value_type& xml_object)\n{\n Map::MapObject map_object{};\n\n map_object.setScale(xml_object.second.get<float>(\"<xmlattr>.scale\", 1.0f));\n map_object.setXRotation(xml_object.second.get<float>(\"<xmlattr>.xrotation\", 0.0f));\n map_object.setYRotation(xml_object.second.get<float>(\"<xmlattr>.yrotation\", 0.0f));\n map_object.setZRotation(xml_object.second.get<float>(\"<xmlattr>.zrotation\", 0.0f));\n\n bg::point3f_t position{};\n position.set<X_COORDINATE>(xml_object.second.get<float>(\"<xmlattr>.x\", 0.0f));\n position.set<Y_COORDINATE>(xml_object.second.get<float>(\"<xmlattr>.y\", 0.0f));\n position.set<Z_COORDINATE>(xml_object.second.get<float>(\"<xmlattr>.z\", 0.0f));\n map_object.setPosition(position);\n\n return map_object;\n}\n" }, { "alpha_fraction": 0.675564706325531, "alphanum_fraction": 0.6848049163818359, "avg_line_length": 25.324323654174805, "blob_id": "50cff859c4d90ec986d59acc11740a52850cbb78", "content_id": "9eeab8705c2693c7a7ece6d169d684ae1de23a6f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 974, "license_type": "permissive", "max_line_length": 53, "num_lines": 37, "path": "/Dependencies/LibXL/Include/libxl.h", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#ifndef LIBXL_CPP_H\n#define LIBXL_CPP_H\n\n#define LIBXL_VERSION 0x03080500\n\n#include \"IBookT.h\"\n#include \"ISheetT.h\"\n#include \"IFormatT.h\"\n#include \"IFontT.h\"\n#include \"IAutoFilterT.h\"\n#include \"IFilterColumnT.h\"\n\nnamespace libxl {\n\n #ifdef _UNICODE\n typedef IBookT<wchar_t> Book;\n typedef ISheetT<wchar_t> Sheet;\n typedef IFormatT<wchar_t> Format;\n typedef IFontT<wchar_t> Font;\n typedef IAutoFilterT<wchar_t> AutoFilter;\n typedef IFilterColumnT<wchar_t> FilterColumn;\n #define xlCreateBook xlCreateBookW\n #define xlCreateXMLBook xlCreateXMLBookW\n #else\n typedef IBookT<char> Book;\n typedef ISheetT<char> Sheet;\n typedef IFormatT<char> Format;\n typedef IFontT<char> Font;\n typedef IAutoFilterT<char> AutoFilter;\n typedef IFilterColumnT<char> FilterColumn;\n #define xlCreateBook xlCreateBookA\n #define xlCreateXMLBook xlCreateXMLBookA\n #endif\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.6765415072441101, "alphanum_fraction": 0.6834309101104736, "avg_line_length": 30.21505355834961, "blob_id": "15998deb0d10999f326fcb272a207d675d52408c", "content_id": "8fc2536bbd3130c0351e8e48ea8d6a64827caef6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2903, "license_type": "permissive", "max_line_length": 97, "num_lines": 93, "path": "/Sources/Core/Graphics/Renderer.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Renderer.hpp\"\n\n#include \"Shader.hpp\"\n#include \"SceneGraph/Node.hpp\"\n#include \"SceneGraph/Scene.hpp\"\n#include \"SceneGraph/Light.hpp\"\n#include \"Input/MouseState.hpp\"\n#include \"Input/KeyboardState.hpp\"\n\nvoid Core::Graphics::Renderer::draw(const SceneGraph::Scene& scene, const std::string& shader_id)\n{\n m_frame_calculator.onChangingFrame();\n\n m_basic_shader = scene.getShaderByID(shader_id);\n m_basic_shader->use();\n\n m_basic_shader->setUniformMatrix4x4f(\"vs_un_view\", m_camera.getViewMatrix());\n m_basic_shader->setUniformMatrix4x4f(\"vs_un_projection\", m_camera.getProjectionMatrix());\n\n const SceneGraph::Light& light = scene.getLight();\n m_basic_shader->setUniformVector3f(\"fs_un_light_color\", light.getAmbientColor());\n\n updateCamera();\n drawNode(scene.getRootNode());\n}\n\nvoid Core::Graphics::Renderer::drawNode(SceneGraph::NodeSPtr node)\n{\n if (node->isExistChildren())\n {\n for (auto it = node->childrenBegin(); it != node->childrenEnd(); ++it)\n {\n SceneGraph::NodeSPtr child_node = *it;\n drawNode(child_node);\n }\n }\n\n const glm::mat4x4& transformation = node->getTransformation();\n m_basic_shader->setUniformMatrix4x4f(\"vs_un_model\", transformation);\n m_basic_shader->setUniformi(\"fs_un_texture\", 0);\n if (const SceneGraph::Mesh* mesh = node->getMesh())\n {\n mesh->draw();\n }\n}\n\nvoid Core::Graphics::Renderer::updateCamera() noexcept\n{\n m_camera.setSpeed(m_frame_calculator.getDeltaTime());\n\n const Input::KeyboardState& keyboard_state = g_keyboard_state.getInstance();\n if (keyboard_state.isPressedKeyW())\n {\n m_camera.moveForward();\n }\n if (keyboard_state.isPressedKeyS())\n {\n m_camera.moveBackward();\n }\n if (keyboard_state.isPressedKeyA())\n {\n m_camera.moveLeft();\n }\n if (keyboard_state.isPressedKeyD())\n {\n m_camera.moveRight();\n }\n\n Input::MouseState& mouse = g_mouse_state.getInstance();\n const int xDisplacementOffset = mouse.getAndUnsetXDisplacementOffset();\n const int yDisplacementOffset = mouse.getAndUnsetYDisplacementOffset();\n if (xDisplacementOffset != 0 || yDisplacementOffset != 0)\n {\n m_camera.turn(xDisplacementOffset, yDisplacementOffset);\n }\n}\n" }, { "alpha_fraction": 0.7247524857521057, "alphanum_fraction": 0.7326732873916626, "avg_line_length": 30.5625, "blob_id": "fb1f75b57b4a35a9c27b13f9736f712125b101f9", "content_id": "8be9606cc2952cd2b58ca9a2cf9fb5d24e0faa5f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1010, "license_type": "permissive", "max_line_length": 109, "num_lines": 32, "path": "/Sources/Game/Application.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Application.hpp\"\n\n#include \"Identifiers.hpp\"\n#include \"../Core/Managers/SoundManager.hpp\"\n\nGame::Application::Application(int window_width, int window_height, const std::string& window_title) noexcept\n : m_window(window_width, window_height, window_title)\n{\n}\n\nvoid Game::Application::start()\n{\n g_sound_manager.playMusic(ID::g_background_sound);\n m_window.show();\n}\n" }, { "alpha_fraction": 0.7153846025466919, "alphanum_fraction": 0.7197802066802979, "avg_line_length": 34.686275482177734, "blob_id": "fde76dbf5e6d8500a70a504989163252c2e16c25", "content_id": "aa2a7fd08cb244bad1b051042dc16aada8a7b087", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1820, "license_type": "permissive", "max_line_length": 128, "num_lines": 51, "path": "/Sources/Core/Managers/ConfigurationManager.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <string>\n#include <cstdint>\n\n#include \"IManager.hpp\"\n#include \"../Helpers/Holders/Singleton.hpp\"\n\n#ifndef g_configuration_manager\n #define g_configuration_manager Core::Managers::ConfigurationManager::getInstance()\n#endif // g_configuration_manager\n\nnamespace Core::Managers \n{\n\n class ConfigurationManager : public IManager<ConfigurationManager>, public Helpers::Holders::Singleton<ConfigurationManager>\n {\n public:\n void initialize();\n std::string getCurrentLanguage() const noexcept;\n std::string_view getMapsPath() const noexcept;\n std::string_view getModelsPath() const noexcept;\n std::string_view getShadersPath() const noexcept;\n std::string_view getResourcesPath() const noexcept;\n std::string_view getLocalesConfigurationFilename() const noexcept;\n std::string_view getPlayerConfigurationFilename() const noexcept;\n std::string_view getResourcesConfigurationFilename() const noexcept;\n private:\n std::string m_current_language{};\n std::string m_player_file_configuration_path{};\n std::string m_locales_file_configuration_path{};\n std::string m_resources_file_configuration_path{};\n };\n\n}\n" }, { "alpha_fraction": 0.6832981109619141, "alphanum_fraction": 0.7107822299003601, "avg_line_length": 83.46428680419922, "blob_id": "4d24e55bd47648637b66d056d1e96346df3e2a63", "content_id": "489ef5605dce1e9614b13fee3495505195fd4bd3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9460, "license_type": "permissive", "max_line_length": 171, "num_lines": 112, "path": "/Dependencies/LibXL/Include/enum.h", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#ifndef LIBXL_ENUM_CPP_H\n#define LIBXL_ENUM_CPP_H\n\nnamespace libxl {\n\n enum Color {COLOR_BLACK = 8, COLOR_WHITE, COLOR_RED, COLOR_BRIGHTGREEN, COLOR_BLUE, COLOR_YELLOW, COLOR_PINK, COLOR_TURQUOISE, COLOR_DARKRED,\n COLOR_GREEN, COLOR_DARKBLUE, COLOR_DARKYELLOW, COLOR_VIOLET, COLOR_TEAL, COLOR_GRAY25, COLOR_GRAY50, COLOR_PERIWINKLE_CF,\n COLOR_PLUM_CF, COLOR_IVORY_CF, COLOR_LIGHTTURQUOISE_CF, COLOR_DARKPURPLE_CF, COLOR_CORAL_CF, COLOR_OCEANBLUE_CF, COLOR_ICEBLUE_CF,\n COLOR_DARKBLUE_CL, COLOR_PINK_CL, COLOR_YELLOW_CL, COLOR_TURQUOISE_CL, COLOR_VIOLET_CL, COLOR_DARKRED_CL, COLOR_TEAL_CL,\n COLOR_BLUE_CL, COLOR_SKYBLUE, COLOR_LIGHTTURQUOISE, COLOR_LIGHTGREEN, COLOR_LIGHTYELLOW, COLOR_PALEBLUE, COLOR_ROSE, COLOR_LAVENDER,\n COLOR_TAN, COLOR_LIGHTBLUE, COLOR_AQUA, COLOR_LIME, COLOR_GOLD, COLOR_LIGHTORANGE, COLOR_ORANGE, COLOR_BLUEGRAY, COLOR_GRAY40,\n COLOR_DARKTEAL, COLOR_SEAGREEN, COLOR_DARKGREEN, COLOR_OLIVEGREEN, COLOR_BROWN, COLOR_PLUM, COLOR_INDIGO, COLOR_GRAY80,\n COLOR_DEFAULT_FOREGROUND = 0x0040, COLOR_DEFAULT_BACKGROUND = 0x0041, COLOR_TOOLTIP = 0x0051, COLOR_NONE = 0x7F, COLOR_AUTO = 0x7FFF};\n\n enum NumFormat {NUMFORMAT_GENERAL, NUMFORMAT_NUMBER, NUMFORMAT_NUMBER_D2, NUMFORMAT_NUMBER_SEP, NUMFORMAT_NUMBER_SEP_D2,\n NUMFORMAT_CURRENCY_NEGBRA, NUMFORMAT_CURRENCY_NEGBRARED, NUMFORMAT_CURRENCY_D2_NEGBRA, NUMFORMAT_CURRENCY_D2_NEGBRARED,\n NUMFORMAT_PERCENT, NUMFORMAT_PERCENT_D2, NUMFORMAT_SCIENTIFIC_D2, NUMFORMAT_FRACTION_ONEDIG, NUMFORMAT_FRACTION_TWODIG,\n NUMFORMAT_DATE, NUMFORMAT_CUSTOM_D_MON_YY, NUMFORMAT_CUSTOM_D_MON, NUMFORMAT_CUSTOM_MON_YY,\n NUMFORMAT_CUSTOM_HMM_AM, NUMFORMAT_CUSTOM_HMMSS_AM, NUMFORMAT_CUSTOM_HMM, NUMFORMAT_CUSTOM_HMMSS,\n NUMFORMAT_CUSTOM_MDYYYY_HMM,\n NUMFORMAT_NUMBER_SEP_NEGBRA = 37, NUMFORMAT_NUMBER_SEP_NEGBRARED,\n NUMFORMAT_NUMBER_D2_SEP_NEGBRA, NUMFORMAT_NUMBER_D2_SEP_NEGBRARED, NUMFORMAT_ACCOUNT, NUMFORMAT_ACCOUNTCUR,\n NUMFORMAT_ACCOUNT_D2, NUMFORMAT_ACCOUNT_D2_CUR, NUMFORMAT_CUSTOM_MMSS, NUMFORMAT_CUSTOM_H0MMSS,\n NUMFORMAT_CUSTOM_MMSS0, NUMFORMAT_CUSTOM_000P0E_PLUS0, NUMFORMAT_TEXT};\n\n enum AlignH {ALIGNH_GENERAL, ALIGNH_LEFT, ALIGNH_CENTER, ALIGNH_RIGHT, ALIGNH_FILL, ALIGNH_JUSTIFY, ALIGNH_MERGE, ALIGNH_DISTRIBUTED};\n enum AlignV {ALIGNV_TOP, ALIGNV_CENTER, ALIGNV_BOTTOM, ALIGNV_JUSTIFY, ALIGNV_DISTRIBUTED};\n\n enum BorderStyle {BORDERSTYLE_NONE, BORDERSTYLE_THIN, BORDERSTYLE_MEDIUM, BORDERSTYLE_DASHED, BORDERSTYLE_DOTTED, BORDERSTYLE_THICK,\n BORDERSTYLE_DOUBLE, BORDERSTYLE_HAIR, BORDERSTYLE_MEDIUMDASHED, BORDERSTYLE_DASHDOT, BORDERSTYLE_MEDIUMDASHDOT,\n BORDERSTYLE_DASHDOTDOT, BORDERSTYLE_MEDIUMDASHDOTDOT, BORDERSTYLE_SLANTDASHDOT};\n\n enum BorderDiagonal {BORDERDIAGONAL_NONE, BORDERDIAGONAL_DOWN, BORDERDIAGONAL_UP, BORDERDIAGONAL_BOTH};\n\n enum FillPattern {FILLPATTERN_NONE, FILLPATTERN_SOLID, FILLPATTERN_GRAY50, FILLPATTERN_GRAY75, FILLPATTERN_GRAY25,\n FILLPATTERN_HORSTRIPE, FILLPATTERN_VERSTRIPE, FILLPATTERN_REVDIAGSTRIPE, FILLPATTERN_DIAGSTRIPE,\n FILLPATTERN_DIAGCROSSHATCH, FILLPATTERN_THICKDIAGCROSSHATCH, FILLPATTERN_THINHORSTRIPE, FILLPATTERN_THINVERSTRIPE,\n FILLPATTERN_THINREVDIAGSTRIPE, FILLPATTERN_THINDIAGSTRIPE, FILLPATTERN_THINHORCROSSHATCH, FILLPATTERN_THINDIAGCROSSHATCH,\n FILLPATTERN_GRAY12P5, FILLPATTERN_GRAY6P25};\n\n enum Script {SCRIPT_NORMAL, SCRIPT_SUPER, SCRIPT_SUB};\n enum Underline {UNDERLINE_NONE, UNDERLINE_SINGLE, UNDERLINE_DOUBLE, UNDERLINE_SINGLEACC = 0x21, UNDERLINE_DOUBLEACC = 0x22};\n\n enum Paper {PAPER_DEFAULT, PAPER_LETTER, PAPER_LETTERSMALL, PAPER_TABLOID, PAPER_LEDGER, PAPER_LEGAL, PAPER_STATEMENT, PAPER_EXECUTIVE, PAPER_A3,\n PAPER_A4, PAPER_A4SMALL, PAPER_A5, PAPER_B4, PAPER_B5, PAPER_FOLIO, PAPER_QUATRO, PAPER_10x14, PAPER_10x17, PAPER_NOTE, PAPER_ENVELOPE_9,\n PAPER_ENVELOPE_10, PAPER_ENVELOPE_11, PAPER_ENVELOPE_12, PAPER_ENVELOPE_14, PAPER_C_SIZE, PAPER_D_SIZE, PAPER_E_SIZE, PAPER_ENVELOPE_DL,\n PAPER_ENVELOPE_C5, PAPER_ENVELOPE_C3, PAPER_ENVELOPE_C4, PAPER_ENVELOPE_C6, PAPER_ENVELOPE_C65, PAPER_ENVELOPE_B4, PAPER_ENVELOPE_B5,\n PAPER_ENVELOPE_B6, PAPER_ENVELOPE, PAPER_ENVELOPE_MONARCH, PAPER_US_ENVELOPE, PAPER_FANFOLD, PAPER_GERMAN_STD_FANFOLD,\n PAPER_GERMAN_LEGAL_FANFOLD, PAPER_B4_ISO, PAPER_JAPANESE_POSTCARD, PAPER_9x11, PAPER_10x11, PAPER_15x11, PAPER_ENVELOPE_INVITE,\n PAPER_US_LETTER_EXTRA = 50, PAPER_US_LEGAL_EXTRA, PAPER_US_TABLOID_EXTRA, PAPER_A4_EXTRA, PAPER_LETTER_TRANSVERSE, PAPER_A4_TRANSVERSE,\n PAPER_LETTER_EXTRA_TRANSVERSE, PAPER_SUPERA, PAPER_SUPERB, PAPER_US_LETTER_PLUS, PAPER_A4_PLUS, PAPER_A5_TRANSVERSE, PAPER_B5_TRANSVERSE,\n PAPER_A3_EXTRA, PAPER_A5_EXTRA, PAPER_B5_EXTRA, PAPER_A2, PAPER_A3_TRANSVERSE, PAPER_A3_EXTRA_TRANSVERSE, PAPER_JAPANESE_DOUBLE_POSTCARD,\n PAPER_A6, PAPER_JAPANESE_ENVELOPE_KAKU2, PAPER_JAPANESE_ENVELOPE_KAKU3, PAPER_JAPANESE_ENVELOPE_CHOU3, PAPER_JAPANESE_ENVELOPE_CHOU4,\n PAPER_LETTER_ROTATED, PAPER_A3_ROTATED, PAPER_A4_ROTATED, PAPER_A5_ROTATED, PAPER_B4_ROTATED, PAPER_B5_ROTATED,\n PAPER_JAPANESE_POSTCARD_ROTATED, PAPER_DOUBLE_JAPANESE_POSTCARD_ROTATED, PAPER_A6_ROTATED, PAPER_JAPANESE_ENVELOPE_KAKU2_ROTATED,\n PAPER_JAPANESE_ENVELOPE_KAKU3_ROTATED, PAPER_JAPANESE_ENVELOPE_CHOU3_ROTATED, PAPER_JAPANESE_ENVELOPE_CHOU4_ROTATED, PAPER_B6,\n PAPER_B6_ROTATED, PAPER_12x11, PAPER_JAPANESE_ENVELOPE_YOU4, PAPER_JAPANESE_ENVELOPE_YOU4_ROTATED, PAPER_PRC16K, PAPER_PRC32K,\n PAPER_PRC32K_BIG, PAPER_PRC_ENVELOPE1, PAPER_PRC_ENVELOPE2, PAPER_PRC_ENVELOPE3, PAPER_PRC_ENVELOPE4, PAPER_PRC_ENVELOPE5,\n PAPER_PRC_ENVELOPE6, PAPER_PRC_ENVELOPE7, PAPER_PRC_ENVELOPE8, PAPER_PRC_ENVELOPE9, PAPER_PRC_ENVELOPE10, PAPER_PRC16K_ROTATED,\n PAPER_PRC32K_ROTATED, PAPER_PRC32KBIG_ROTATED, PAPER_PRC_ENVELOPE1_ROTATED, PAPER_PRC_ENVELOPE2_ROTATED, PAPER_PRC_ENVELOPE3_ROTATED,\n PAPER_PRC_ENVELOPE4_ROTATED, PAPER_PRC_ENVELOPE5_ROTATED, PAPER_PRC_ENVELOPE6_ROTATED, PAPER_PRC_ENVELOPE7_ROTATED,\n PAPER_PRC_ENVELOPE8_ROTATED, PAPER_PRC_ENVELOPE9_ROTATED, PAPER_PRC_ENVELOPE10_ROTATED};\n\n enum SheetType { SHEETTYPE_SHEET, SHEETTYPE_CHART, SHEETTYPE_UNKNOWN };\n\n enum CellType {CELLTYPE_EMPTY, CELLTYPE_NUMBER, CELLTYPE_STRING, CELLTYPE_BOOLEAN, CELLTYPE_BLANK, CELLTYPE_ERROR};\n\n enum ErrorType {ERRORTYPE_NULL = 0x00, ERRORTYPE_DIV_0 = 0x07, ERRORTYPE_VALUE = 0x0F, ERRORTYPE_REF = 0x17, ERRORTYPE_NAME = 0x1D,\n ERRORTYPE_NUM = 0x24, ERRORTYPE_NA = 0x2A, ERRORTYPE_NOERROR = 0xFF};\n\n enum PictureType {PICTURETYPE_PNG, PICTURETYPE_JPEG, PICTURETYPE_GIF, PICTURETYPE_WMF, PICTURETYPE_DIB, PICTURETYPE_EMF,\n PICTURETYPE_PICT, PICTURETYPE_TIFF, PICTURETYPE_ERROR = 0xFF};\n\n enum SheetState {SHEETSTATE_VISIBLE, SHEETSTATE_HIDDEN, SHEETSTATE_VERYHIDDEN};\n\n enum Scope {SCOPE_UNDEFINED = -2, SCOPE_WORKBOOK = -1};\n\n enum Position {POSITION_MOVE_AND_SIZE, POSITION_ONLY_MOVE, POSITION_ABSOLUTE};\n\n enum Operator { OPERATOR_EQUAL, OPERATOR_GREATER_THAN, OPERATOR_GREATER_THAN_OR_EQUAL, OPERATOR_LESS_THAN, OPERATOR_LESS_THAN_OR_EQUAL, OPERATOR_NOT_EQUAL };\n\n enum Filter { FILTER_VALUE, FILTER_TOP10, FILTER_CUSTOM, FILTER_DYNAMIC, FILTER_COLOR, FILTER_ICON, FILTER_EXT, FILTER_NOT_SET };\n\n enum IgnoredError { IERR_NO_ERROR = 0, IERR_EVAL_ERROR = 1, IERR_EMPTY_CELLREF = 2, IERR_NUMBER_STORED_AS_TEXT = 4, IERR_INCONSIST_RANGE = 8,\n IERR_INCONSIST_FMLA = 16, IERR_TWODIG_TEXTYEAR = 32, IERR_UNLOCK_FMLA = 64, IERR_DATA_VALIDATION = 128 };\n\n enum EnhancedProtection { PROT_DEFAULT = -1, PROT_ALL = 0, PROT_OBJECTS = 1, PROT_SCENARIOS = 2, PROT_FORMAT_CELLS = 4, PROT_FORMAT_COLUMNS = 8, PROT_FORMAT_ROWS = 16,\n PROT_INSERT_COLUMNS = 32, PROT_INSERT_ROWS = 64, PROT_INSERT_HYPERLINKS = 128, PROT_DELETE_COLUMNS = 256, PROT_DELETE_ROWS = 512,\n PROT_SEL_LOCKED_CELLS = 1024, PROT_SORT = 2048, PROT_AUTOFILTER = 4096, PROT_PIVOTTABLES = 8192, PROT_SEL_UNLOCKED_CELLS = 16384 };\n\n enum DataValidationType { VALIDATION_TYPE_NONE, VALIDATION_TYPE_WHOLE, VALIDATION_TYPE_DECIMAL, VALIDATION_TYPE_LIST,\n VALIDATION_TYPE_DATE, VALIDATION_TYPE_TIME, VALIDATION_TYPE_TEXTLENGTH, VALIDATION_TYPE_CUSTOM };\n\n enum DataValidationOperator { VALIDATION_OP_BETWEEN, VALIDATION_OP_NOTBETWEEN, VALIDATION_OP_EQUAL, VALIDATION_OP_NOTEQUAL,\n VALIDATION_OP_LESSTHAN, VALIDATION_OP_LESSTHANOREQUAL, VALIDATION_OP_GREATERTHAN, VALIDATION_OP_GREATERTHANOREQUAL };\n\n enum DataValidationErrorStyle { VALIDATION_ERRSTYLE_STOP, VALIDATION_ERRSTYLE_WARNING, VALIDATION_ERRSTYLE_INFORMATION };\n\n inline IgnoredError operator|(IgnoredError left, IgnoredError right)\n {\n return (IgnoredError)((int)left | (int)right);\n }\n\n inline EnhancedProtection operator|(EnhancedProtection left, EnhancedProtection right)\n {\n return (EnhancedProtection)((int)left | (int)right);\n }\n\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.6623826026916504, "alphanum_fraction": 0.6717745661735535, "avg_line_length": 30.609375, "blob_id": "5078219c7187033ce9b7cc6c459df4b080b2d5d2", "content_id": "fbf99201de5b362dabc3a6d8276ed54b44a9a8cd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2023, "license_type": "permissive", "max_line_length": 75, "num_lines": 64, "path": "/Sources/Core/Graphics/SceneGraph/Node.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <deque>\n#include <optional>\n#include <glm/vec4.hpp>\n#include <glm/mat4x4.hpp>\n\n#include \"Mesh.hpp\"\n#include \"../../Helpers/Macroses.hpp\"\n\nnamespace Core::Graphics::SceneGraph \n{\n\n FWD_DECL_SMART_PTRS_FOR_CLASS(Node)\n\n class Node\n {\n public:\n template<typename... Args>\n void emplaceChild(Args... args);\n void addChild(NodeSPtr node) noexcept;\n void removeChild(NodeSPtr node) noexcept;\n void rotateByX(float degrees) noexcept;\n void rotateByY(float degrees) noexcept;\n void rotateByZ(float degrees) noexcept;\n void scale(float value) noexcept;\n void move(const glm::vec3& position) noexcept;\n void move(const glm::vec4& position) noexcept;\n bool isExistChildren() const noexcept;\n bool isExitChild(NodeSPtr node) const noexcept;\n void setMesh(const Mesh* mesh) noexcept;\n const Mesh* getMesh() const noexcept;\n std::deque<NodeSPtr>::iterator childrenBegin() noexcept;\n std::deque<NodeSPtr>::iterator childrenEnd() noexcept;\n const glm::mat4x4& getTransformation() const noexcept;\n private:\n std::deque<NodeSPtr> m_children{};\n glm::mat4x4 m_transformation{ 1.0f };\n const Mesh* m_mesh = nullptr;\n };\n\n template<typename... Args>\n void Node::emplaceChild(Args... args)\n {\n m_children.emplace_back(std::forward<Args>(args)...);\n }\n\n}\n" }, { "alpha_fraction": 0.576073169708252, "alphanum_fraction": 0.587067723274231, "avg_line_length": 22.61610984802246, "blob_id": "517f14afb6c3ff28e215dc1b45a008fab9f1edbc", "content_id": "039c6ff73480170830c64ff05967c867e00ac6fb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 27559, "license_type": "permissive", "max_line_length": 148, "num_lines": 1167, "path": "/Dependencies/OBJLoader/Include/OBJLoader.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "// OBJ_Loader.h - A Single Header OBJ Model Loader\n\n#pragma once\n\n// Iostream - STD I/O Library\n#include <iostream>\n\n// Vector - STD Vector/Array Library\n#include <vector>\n\n// String - STD String Library\n#include <string>\n\n// fStream - STD File I/O Library\n#include <fstream>\n\n// Math.h - STD math Library\n#include <math.h>\n\n// Print progress to console while loading (large models)\n//#define OBJL_CONSOLE_OUTPUT\n\n// Namespace: OBJL\n//\n// Description: The namespace that holds eveyrthing that\n//\tis needed and used for the OBJ Model Loader\nnamespace objl\n{\n\t// Structure: Vector2\n\t//\n\t// Description: A 2D Vector that Holds Positional Data\n\tstruct Vector2\n\t{\n\t\t// Default Constructor\n\t\tVector2()\n\t\t{\n\t\t\tX = 0.0f;\n\t\t\tY = 0.0f;\n\t\t}\n\t\t// Variable Set Constructor\n\t\tVector2(float X_, float Y_)\n\t\t{\n\t\t\tX = X_;\n\t\t\tY = Y_;\n\t\t}\n\t\t// Bool Equals Operator Overload\n\t\tbool operator==(const Vector2& other) const\n\t\t{\n\t\t\treturn (this->X == other.X && this->Y == other.Y);\n\t\t}\n\t\t// Bool Not Equals Operator Overload\n\t\tbool operator!=(const Vector2& other) const\n\t\t{\n\t\t\treturn !(this->X == other.X && this->Y == other.Y);\n\t\t}\n\t\t// Addition Operator Overload\n\t\tVector2 operator+(const Vector2& right) const\n\t\t{\n\t\t\treturn Vector2(this->X + right.X, this->Y + right.Y);\n\t\t}\n\t\t// Subtraction Operator Overload\n\t\tVector2 operator-(const Vector2& right) const\n\t\t{\n\t\t\treturn Vector2(this->X - right.X, this->Y - right.Y);\n\t\t}\n\t\t// Float Multiplication Operator Overload\n\t\tVector2 operator*(const float& other) const\n\t\t{\n\t\t\treturn Vector2(this->X *other, this->Y * other);\n\t\t}\n\n\t\t// Positional Variables\n\t\tfloat X;\n\t\tfloat Y;\n\t};\n\n\t// Structure: Vector3\n\t//\n\t// Description: A 3D Vector that Holds Positional Data\n\tstruct Vector3\n\t{\n\t\t// Default Constructor\n\t\tVector3()\n\t\t{\n\t\t\tX = 0.0f;\n\t\t\tY = 0.0f;\n\t\t\tZ = 0.0f;\n\t\t}\n\t\t// Variable Set Constructor\n\t\tVector3(float X_, float Y_, float Z_)\n\t\t{\n\t\t\tX = X_;\n\t\t\tY = Y_;\n\t\t\tZ = Z_;\n\t\t}\n\t\t// Bool Equals Operator Overload\n\t\tbool operator==(const Vector3& other) const\n\t\t{\n\t\t\treturn (this->X == other.X && this->Y == other.Y && this->Z == other.Z);\n\t\t}\n\t\t// Bool Not Equals Operator Overload\n\t\tbool operator!=(const Vector3& other) const\n\t\t{\n\t\t\treturn !(this->X == other.X && this->Y == other.Y && this->Z == other.Z);\n\t\t}\n\t\t// Addition Operator Overload\n\t\tVector3 operator+(const Vector3& right) const\n\t\t{\n\t\t\treturn Vector3(this->X + right.X, this->Y + right.Y, this->Z + right.Z);\n\t\t}\n\t\t// Subtraction Operator Overload\n\t\tVector3 operator-(const Vector3& right) const\n\t\t{\n\t\t\treturn Vector3(this->X - right.X, this->Y - right.Y, this->Z - right.Z);\n\t\t}\n\t\t// Float Multiplication Operator Overload\n\t\tVector3 operator*(const float& other) const\n\t\t{\n\t\t\treturn Vector3(this->X * other, this->Y * other, this->Z * other);\n\t\t}\n\t\t// Float Division Operator Overload\n\t\tVector3 operator/(const float& other) const\n\t\t{\n\t\t\treturn Vector3(this->X / other, this->Y / other, this->Z / other);\n\t\t}\n\n\t\t// Positional Variables\n\t\tfloat X;\n\t\tfloat Y;\n\t\tfloat Z;\n\t};\n\n\t// Structure: Vertex\n\t//\n\t// Description: Model Vertex object that holds\n\t//\ta Position, Normal, and Texture Coordinate\n\tstruct Vertex\n\t{\n\t\t// Position Vector\n\t\tVector3 Position;\n\n\t\t// Normal Vector\n\t\tVector3 Normal;\n\n\t\t// Texture Coordinate Vector\n\t\tVector2 TextureCoordinate;\n\t};\n\n\tstruct Material\n\t{\n\t\tMaterial()\n\t\t{\n\t\t\tname;\n\t\t\tNs = 0.0f;\n\t\t\tNi = 0.0f;\n\t\t\td = 0.0f;\n\t\t\tillum = 0;\n\t\t}\n\n\t\t// Material Name\n\t\tstd::string name;\n\t\t// Ambient Color\n\t\tVector3 Ka;\n\t\t// Diffuse Color\n\t\tVector3 Kd;\n\t\t// Specular Color\n\t\tVector3 Ks;\n\t\t// Specular Exponent\n\t\tfloat Ns;\n\t\t// Optical Density\n\t\tfloat Ni;\n\t\t// Dissolve\n\t\tfloat d;\n\t\t// Illumination\n\t\tint illum;\n\t\t// Ambient Texture Map\n\t\tstd::string map_Ka;\n\t\t// Diffuse Texture Map\n\t\tstd::string map_Kd;\n\t\t// Specular Texture Map\n\t\tstd::string map_Ks;\n\t\t// Specular Hightlight Map\n\t\tstd::string map_Ns;\n\t\t// Alpha Texture Map\n\t\tstd::string map_d;\n\t\t// Bump Map\n\t\tstd::string map_bump;\n\t};\n\n\t// Structure: Mesh\n\t//\n\t// Description: A Simple Mesh Object that holds\n\t//\ta name, a vertex list, and an index list\n\tstruct Mesh\n\t{\n\t\t// Default Constructor\n\t\tMesh()\n\t\t{\n\n\t\t}\n\t\t// Variable Set Constructor\n\t\tMesh(std::vector<Vertex>& _Vertices, std::vector<unsigned int>& _Indices)\n\t\t{\n\t\t\tVertices = _Vertices;\n\t\t\tIndices = _Indices;\n\t\t}\n\t\t// Mesh Name\n\t\tstd::string MeshName;\n\t\t// Vertex List\n\t\tstd::vector<Vertex> Vertices;\n\t\t// Index List\n\t\tstd::vector<unsigned int> Indices;\n\n\t\t// Material\n\t\tMaterial MeshMaterial;\n\t};\n\n\t// Namespace: Math\n\t//\n\t// Description: The namespace that holds all of the math\n\t//\tfunctions need for OBJL\n\tnamespace math\n\t{\n\t\t// Vector3 Cross Product\n\t\tVector3 CrossV3(const Vector3 a, const Vector3 b)\n\t\t{\n\t\t\treturn Vector3(a.Y * b.Z - a.Z * b.Y,\n\t\t\t\ta.Z * b.X - a.X * b.Z,\n\t\t\t\ta.X * b.Y - a.Y * b.X);\n\t\t}\n\n\t\t// Vector3 Magnitude Calculation\n\t\tfloat MagnitudeV3(const Vector3 in)\n\t\t{\n\t\t\treturn (sqrtf(powf(in.X, 2) + powf(in.Y, 2) + powf(in.Z, 2)));\n\t\t}\n\n\t\t// Vector3 DotProduct\n\t\tfloat DotV3(const Vector3 a, const Vector3 b)\n\t\t{\n\t\t\treturn (a.X * b.X) + (a.Y * b.Y) + (a.Z * b.Z);\n\t\t}\n\n\t\t// Angle between 2 Vector3 Objects\n\t\tfloat AngleBetweenV3(const Vector3 a, const Vector3 b)\n\t\t{\n\t\t\tfloat angle = DotV3(a, b);\n\t\t\tangle /= (MagnitudeV3(a) * MagnitudeV3(b));\n\t\t\treturn angle = acosf(angle);\n\t\t}\n\n\t\t// Projection Calculation of a onto b\n\t\tVector3 ProjV3(const Vector3 a, const Vector3 b)\n\t\t{\n\t\t\tVector3 bn = b / MagnitudeV3(b);\n\t\t\treturn bn * DotV3(a, bn);\n\t\t}\n\t}\n\n\t// Namespace: Algorithm\n\t//\n\t// Description: The namespace that holds all of the\n\t// Algorithms needed for OBJL\n\tnamespace algorithm\n\t{\n\t\t// Vector3 Multiplication Opertor Overload\n\t\tVector3 operator*(const float& left, const Vector3& right)\n\t\t{\n\t\t\treturn Vector3(right.X * left, right.Y * left, right.Z * left);\n\t\t}\n\n\t\t// A test to see if P1 is on the same side as P2 of a line segment ab\n\t\tbool SameSide(Vector3 p1, Vector3 p2, Vector3 a, Vector3 b)\n\t\t{\n\t\t\tVector3 cp1 = math::CrossV3(b - a, p1 - a);\n\t\t\tVector3 cp2 = math::CrossV3(b - a, p2 - a);\n\n\t\t\tif (math::DotV3(cp1, cp2) >= 0)\n\t\t\t\treturn true;\n\t\t\telse\n\t\t\t\treturn false;\n\t\t}\n\n\t\t// Generate a cross produect normal for a triangle\n\t\tVector3 GenTriNormal(Vector3 t1, Vector3 t2, Vector3 t3)\n\t\t{\n\t\t\tVector3 u = t2 - t1;\n\t\t\tVector3 v = t3 - t1;\n\n\t\t\tVector3 normal = math::CrossV3(u,v);\n\n\t\t\treturn normal;\n\t\t}\n\n\t\t// Check to see if a Vector3 Point is within a 3 Vector3 Triangle\n\t\tbool inTriangle(Vector3 point, Vector3 tri1, Vector3 tri2, Vector3 tri3)\n\t\t{\n\t\t\t// Test to see if it is within an infinite prism that the triangle outlines.\n\t\t\tbool within_tri_prisim = SameSide(point, tri1, tri2, tri3) && SameSide(point, tri2, tri1, tri3)\n\t\t\t\t&& SameSide(point, tri3, tri1, tri2);\n\n\t\t\t// If it isn't it will never be on the triangle\n\t\t\tif (!within_tri_prisim)\n\t\t\t\treturn false;\n\n\t\t\t// Calulate Triangle's Normal\n\t\t\tVector3 n = GenTriNormal(tri1, tri2, tri3);\n\n\t\t\t// Project the point onto this normal\n\t\t\tVector3 proj = math::ProjV3(point, n);\n\n\t\t\t// If the distance from the triangle to the point is 0\n\t\t\t//\tit lies on the triangle\n\t\t\tif (math::MagnitudeV3(proj) == 0)\n\t\t\t\treturn true;\n\t\t\telse\n\t\t\t\treturn false;\n\t\t}\n\n\t\t// Split a String into a string array at a given token\n\t\tinline void split(const std::string &in,\n\t\t\tstd::vector<std::string> &out,\n\t\t\tstd::string token)\n\t\t{\n\t\t\tout.clear();\n\n\t\t\tstd::string temp;\n\n\t\t\tfor (int i = 0; i < int(in.size()); i++)\n\t\t\t{\n\t\t\t\tstd::string test = in.substr(i, token.size());\n\n\t\t\t\tif (test == token)\n\t\t\t\t{\n\t\t\t\t\tif (!temp.empty())\n\t\t\t\t\t{\n\t\t\t\t\t\tout.push_back(temp);\n\t\t\t\t\t\ttemp.clear();\n\t\t\t\t\t\ti += (int)token.size() - 1;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tout.push_back(\"\");\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse if (i + token.size() >= in.size())\n\t\t\t\t{\n\t\t\t\t\ttemp += in.substr(i, token.size());\n\t\t\t\t\tout.push_back(temp);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\ttemp += in[i];\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Get tail of string after first token and possibly following spaces\n\t\tinline std::string tail(const std::string &in)\n\t\t{\n\t\t\tsize_t token_start = in.find_first_not_of(\" \\t\");\n\t\t\tsize_t space_start = in.find_first_of(\" \\t\", token_start);\n\t\t\tsize_t tail_start = in.find_first_not_of(\" \\t\", space_start);\n\t\t\tsize_t tail_end = in.find_last_not_of(\" \\t\");\n\t\t\tif (tail_start != std::string::npos && tail_end != std::string::npos)\n\t\t\t{\n\t\t\t\treturn in.substr(tail_start, tail_end - tail_start + 1);\n\t\t\t}\n\t\t\telse if (tail_start != std::string::npos)\n\t\t\t{\n\t\t\t\treturn in.substr(tail_start);\n\t\t\t}\n\t\t\treturn \"\";\n\t\t}\n\n\t\t// Get first token of string\n\t\tinline std::string firstToken(const std::string &in)\n\t\t{\n\t\t\tif (!in.empty())\n\t\t\t{\n\t\t\t\tsize_t token_start = in.find_first_not_of(\" \\t\");\n\t\t\t\tsize_t token_end = in.find_first_of(\" \\t\", token_start);\n\t\t\t\tif (token_start != std::string::npos && token_end != std::string::npos)\n\t\t\t\t{\n\t\t\t\t\treturn in.substr(token_start, token_end - token_start);\n\t\t\t\t}\n\t\t\t\telse if (token_start != std::string::npos)\n\t\t\t\t{\n\t\t\t\t\treturn in.substr(token_start);\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn \"\";\n\t\t}\n\n\t\t// Get element at given index position\n\t\ttemplate <class T>\n\t\tinline const T & getElement(const std::vector<T> &elements, std::string &index)\n\t\t{\n\t\t\tint idx = std::stoi(index);\n\t\t\tif (idx < 0)\n\t\t\t\tidx = int(elements.size()) + idx;\n\t\t\telse\n\t\t\t\tidx--;\n\t\t\treturn elements[idx];\n\t\t}\n\t}\n\n\t// Class: Loader\n\t//\n\t// Description: The OBJ Model Loader\n\tclass Loader\n\t{\n\tpublic:\n\t\t// Default Constructor\n\t\tLoader()\n\t\t{\n\n\t\t}\n\t\t~Loader()\n\t\t{\n\t\t\tLoadedMeshes.clear();\n\t\t}\n\n\t\t// Load a file into the loader\n\t\t//\n\t\t// If file is loaded return true\n\t\t//\n\t\t// If the file is unable to be found\n\t\t// or unable to be loaded return false\n\t\tbool LoadFile(std::string Path)\n\t\t{\n\t\t\t// If the file is not an .obj file return false\n\t\t\tif (Path.substr(Path.size() - 4, 4) != \".obj\")\n\t\t\t\treturn false;\n\n\n\t\t\tstd::ifstream file(Path);\n\n\t\t\tif (!file.is_open())\n\t\t\t\treturn false;\n\n\t\t\tLoadedMeshes.clear();\n\t\t\tLoadedVertices.clear();\n\t\t\tLoadedIndices.clear();\n\n\t\t\tstd::vector<Vector3> Positions;\n\t\t\tstd::vector<Vector2> TCoords;\n\t\t\tstd::vector<Vector3> Normals;\n\n\t\t\tstd::vector<Vertex> Vertices;\n\t\t\tstd::vector<unsigned int> Indices;\n\n\t\t\tstd::vector<std::string> MeshMatNames;\n\n\t\t\tbool listening = false;\n\t\t\tstd::string meshname;\n\n\t\t\tMesh tempMesh;\n\n\t\t\t#ifdef OBJL_CONSOLE_OUTPUT\n\t\t\tconst unsigned int outputEveryNth = 1000;\n\t\t\tunsigned int outputIndicator = outputEveryNth;\n\t\t\t#endif\n\n\t\t\tstd::string curline;\n\t\t\twhile (std::getline(file, curline))\n\t\t\t{\n\t\t\t\t#ifdef OBJL_CONSOLE_OUTPUT\n\t\t\t\tif ((outputIndicator = ((outputIndicator + 1) % outputEveryNth)) == 1)\n\t\t\t\t{\n\t\t\t\t\tif (!meshname.empty())\n\t\t\t\t\t{\n\t\t\t\t\t\tstd::cout\n\t\t\t\t\t\t\t<< \"\\r- \" << meshname\n\t\t\t\t\t\t\t<< \"\\t| vertices > \" << Positions.size()\n\t\t\t\t\t\t\t<< \"\\t| texcoords > \" << TCoords.size()\n\t\t\t\t\t\t\t<< \"\\t| normals > \" << Normals.size()\n\t\t\t\t\t\t\t<< \"\\t| triangles > \" << (Vertices.size() / 3)\n\t\t\t\t\t\t\t<< (!MeshMatNames.empty() ? \"\\t| material: \" + MeshMatNames.back() : \"\");\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t#endif\n\n\t\t\t\t// Generate a Mesh Object or Prepare for an object to be created\n\t\t\t\tif (algorithm::firstToken(curline) == \"o\" || algorithm::firstToken(curline) == \"g\" || curline[0] == 'g')\n\t\t\t\t{\n\t\t\t\t\tif (!listening)\n\t\t\t\t\t{\n\t\t\t\t\t\tlistening = true;\n\n\t\t\t\t\t\tif (algorithm::firstToken(curline) == \"o\" || algorithm::firstToken(curline) == \"g\")\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tmeshname = algorithm::tail(curline);\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tmeshname = \"unnamed\";\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\t// Generate the mesh to put into the array\n\n\t\t\t\t\t\tif (!Indices.empty() && !Vertices.empty())\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t// Create Mesh\n\t\t\t\t\t\t\ttempMesh = Mesh(Vertices, Indices);\n\t\t\t\t\t\t\ttempMesh.MeshName = meshname;\n\n\t\t\t\t\t\t\t// Insert Mesh\n\t\t\t\t\t\t\tLoadedMeshes.push_back(tempMesh);\n\n\t\t\t\t\t\t\t// Cleanup\n\t\t\t\t\t\t\tVertices.clear();\n\t\t\t\t\t\t\tIndices.clear();\n\t\t\t\t\t\t\tmeshname.clear();\n\n\t\t\t\t\t\t\tmeshname = algorithm::tail(curline);\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif (algorithm::firstToken(curline) == \"o\" || algorithm::firstToken(curline) == \"g\")\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tmeshname = algorithm::tail(curline);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tmeshname = \"unnamed\";\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t#ifdef OBJL_CONSOLE_OUTPUT\n\t\t\t\t\tstd::cout << std::endl;\n\t\t\t\t\toutputIndicator = 0;\n\t\t\t\t\t#endif\n\t\t\t\t}\n\t\t\t\t// Generate a Vertex Position\n\t\t\t\tif (algorithm::firstToken(curline) == \"v\")\n\t\t\t\t{\n\t\t\t\t\tstd::vector<std::string> spos;\n\t\t\t\t\tVector3 vpos;\n\t\t\t\t\talgorithm::split(algorithm::tail(curline), spos, \" \");\n\n\t\t\t\t\tvpos.X = std::stof(spos[0]);\n\t\t\t\t\tvpos.Y = std::stof(spos[1]);\n\t\t\t\t\tvpos.Z = std::stof(spos[2]);\n\n\t\t\t\t\tPositions.push_back(vpos);\n\t\t\t\t}\n\t\t\t\t// Generate a Vertex Texture Coordinate\n\t\t\t\tif (algorithm::firstToken(curline) == \"vt\")\n\t\t\t\t{\n\t\t\t\t\tstd::vector<std::string> stex;\n\t\t\t\t\tVector2 vtex;\n\t\t\t\t\talgorithm::split(algorithm::tail(curline), stex, \" \");\n\n\t\t\t\t\tvtex.X = std::stof(stex[0]);\n\t\t\t\t\tvtex.Y = std::stof(stex[1]);\n\n\t\t\t\t\tTCoords.push_back(vtex);\n\t\t\t\t}\n\t\t\t\t// Generate a Vertex Normal;\n\t\t\t\tif (algorithm::firstToken(curline) == \"vn\")\n\t\t\t\t{\n\t\t\t\t\tstd::vector<std::string> snor;\n\t\t\t\t\tVector3 vnor;\n\t\t\t\t\talgorithm::split(algorithm::tail(curline), snor, \" \");\n\n\t\t\t\t\tvnor.X = std::stof(snor[0]);\n\t\t\t\t\tvnor.Y = std::stof(snor[1]);\n\t\t\t\t\tvnor.Z = std::stof(snor[2]);\n\n\t\t\t\t\tNormals.push_back(vnor);\n\t\t\t\t}\n\t\t\t\t// Generate a Face (vertices & indices)\n\t\t\t\tif (algorithm::firstToken(curline) == \"f\")\n\t\t\t\t{\n\t\t\t\t\t// Generate the vertices\n\t\t\t\t\tstd::vector<Vertex> vVerts;\n\t\t\t\t\tGenVerticesFromRawOBJ(vVerts, Positions, TCoords, Normals, curline);\n\n\t\t\t\t\t// Add Vertices\n\t\t\t\t\tfor (int i = 0; i < int(vVerts.size()); i++)\n\t\t\t\t\t{\n\t\t\t\t\t\tVertices.push_back(vVerts[i]);\n\n\t\t\t\t\t\tLoadedVertices.push_back(vVerts[i]);\n\t\t\t\t\t}\n\n\t\t\t\t\tstd::vector<unsigned int> iIndices;\n\n\t\t\t\t\tVertexTriangluation(iIndices, vVerts);\n\n\t\t\t\t\t// Add Indices\n\t\t\t\t\tfor (int i = 0; i < int(iIndices.size()); i++)\n\t\t\t\t\t{\n\t\t\t\t\t\tunsigned int indnum = (unsigned int)((Vertices.size()) - vVerts.size()) + iIndices[i];\n\t\t\t\t\t\tIndices.push_back(indnum);\n\n\t\t\t\t\t\tindnum = (unsigned int)((LoadedVertices.size()) - vVerts.size()) + iIndices[i];\n\t\t\t\t\t\tLoadedIndices.push_back(indnum);\n\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Get Mesh Material Name\n\t\t\t\tif (algorithm::firstToken(curline) == \"usemtl\")\n\t\t\t\t{\n\t\t\t\t\tMeshMatNames.push_back(algorithm::tail(curline));\n\n\t\t\t\t\t// Create new Mesh, if Material changes within a group\n\t\t\t\t\tif (!Indices.empty() && !Vertices.empty())\n\t\t\t\t\t{\n\t\t\t\t\t\t// Create Mesh\n\t\t\t\t\t\ttempMesh = Mesh(Vertices, Indices);\n\t\t\t\t\t\ttempMesh.MeshName = meshname;\n\t\t\t\t\t\tint i = 2;\n\t\t\t\t\t\twhile(1) {\n\t\t\t\t\t\t\ttempMesh.MeshName = meshname + \"_\" + std::to_string(i);\n\n\t\t\t\t\t\t\tfor (auto &m : LoadedMeshes)\n\t\t\t\t\t\t\t\tif (m.MeshName == tempMesh.MeshName)\n\t\t\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Insert Mesh\n\t\t\t\t\t\tLoadedMeshes.push_back(tempMesh);\n\n\t\t\t\t\t\t// Cleanup\n\t\t\t\t\t\tVertices.clear();\n\t\t\t\t\t\tIndices.clear();\n\t\t\t\t\t}\n\n\t\t\t\t\t#ifdef OBJL_CONSOLE_OUTPUT\n\t\t\t\t\toutputIndicator = 0;\n\t\t\t\t\t#endif\n\t\t\t\t}\n\t\t\t\t// Load Materials\n\t\t\t\tif (algorithm::firstToken(curline) == \"mtllib\")\n\t\t\t\t{\n\t\t\t\t\t// Generate LoadedMaterial\n\n\t\t\t\t\t// Generate a path to the material file\n\t\t\t\t\tstd::vector<std::string> temp;\n\t\t\t\t\talgorithm::split(Path, temp, \"/\");\n\n\t\t\t\t\tstd::string pathtomat = \"\";\n\n\t\t\t\t\tif (temp.size() != 1)\n\t\t\t\t\t{\n\t\t\t\t\t\tfor (size_t i = 0; i < temp.size() - 1; i++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tpathtomat += temp[i] + \"/\";\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\n\t\t\t\t\tpathtomat += algorithm::tail(curline);\n\n\t\t\t\t\t#ifdef OBJL_CONSOLE_OUTPUT\n\t\t\t\t\tstd::cout << std::endl << \"- find materials in: \" << pathtomat << std::endl;\n\t\t\t\t\t#endif\n\n\t\t\t\t\t// Load Materials\n\t\t\t\t\tLoadMaterials(pathtomat);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t#ifdef OBJL_CONSOLE_OUTPUT\n\t\t\tstd::cout << std::endl;\n\t\t\t#endif\n\n\t\t\t// Deal with last mesh\n\n\t\t\tif (!Indices.empty() && !Vertices.empty())\n\t\t\t{\n\t\t\t\t// Create Mesh\n\t\t\t\ttempMesh = Mesh(Vertices, Indices);\n\t\t\t\ttempMesh.MeshName = meshname;\n\n\t\t\t\t// Insert Mesh\n\t\t\t\tLoadedMeshes.push_back(tempMesh);\n\t\t\t}\n\n\t\t\tfile.close();\n\n\t\t\t// Set Materials for each Mesh\n\t\t\tfor (size_t i = 0; i < MeshMatNames.size(); i++)\n\t\t\t{\n\t\t\t\tstd::string matname = MeshMatNames[i];\n\n\t\t\t\t// Find corresponding material name in loaded materials\n\t\t\t\t// when found copy material variables into mesh material\n\t\t\t\tfor (size_t j = 0; j < LoadedMaterials.size(); j++)\n\t\t\t\t{\n\t\t\t\t\tif (LoadedMaterials[j].name == matname)\n\t\t\t\t\t{\n\t\t\t\t\t\tLoadedMeshes[i].MeshMaterial = LoadedMaterials[j];\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (LoadedMeshes.empty() && LoadedVertices.empty() && LoadedIndices.empty())\n\t\t\t{\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\treturn true;\n\t\t\t}\n\t\t}\n\n\t\t// Loaded Mesh Objects\n\t\tstd::vector<Mesh> LoadedMeshes;\n\t\t// Loaded Vertex Objects\n\t\tstd::vector<Vertex> LoadedVertices;\n\t\t// Loaded Index Positions\n\t\tstd::vector<unsigned int> LoadedIndices;\n\t\t// Loaded Material Objects\n\t\tstd::vector<Material> LoadedMaterials;\n\n\tprivate:\n\t\t// Generate vertices from a list of positions, \n\t\t//\ttcoords, normals and a face line\n\t\tvoid GenVerticesFromRawOBJ(std::vector<Vertex>& oVerts,\n\t\t\tconst std::vector<Vector3>& iPositions,\n\t\t\tconst std::vector<Vector2>& iTCoords,\n\t\t\tconst std::vector<Vector3>& iNormals,\n\t\t\tstd::string icurline)\n\t\t{\n\t\t\tstd::vector<std::string> sface, svert;\n\t\t\tVertex vVert;\n\t\t\talgorithm::split(algorithm::tail(icurline), sface, \" \");\n\n\t\t\tbool noNormal = false;\n\n\t\t\t// For every given vertex do this\n\t\t\tfor (int i = 0; i < int(sface.size()); i++)\n\t\t\t{\n\t\t\t\t// See What type the vertex is.\n\t\t\t\tint vtype;\n\n\t\t\t\talgorithm::split(sface[i], svert, \"/\");\n\n\t\t\t\t// Check for just position - v1\n\t\t\t\tif (svert.size() == 1)\n\t\t\t\t{\n\t\t\t\t\t// Only position\n\t\t\t\t\tvtype = 1;\n\t\t\t\t}\n\n\t\t\t\t// Check for position & texture - v1/vt1\n\t\t\t\tif (svert.size() == 2)\n\t\t\t\t{\n\t\t\t\t\t// Position & Texture\n\t\t\t\t\tvtype = 2;\n\t\t\t\t}\n\n\t\t\t\t// Check for Position, Texture and Normal - v1/vt1/vn1\n\t\t\t\t// or if Position and Normal - v1//vn1\n\t\t\t\tif (svert.size() == 3)\n\t\t\t\t{\n\t\t\t\t\tif (svert[1] != \"\")\n\t\t\t\t\t{\n\t\t\t\t\t\t// Position, Texture, and Normal\n\t\t\t\t\t\tvtype = 4;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\t// Position & Normal\n\t\t\t\t\t\tvtype = 3;\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Calculate and store the vertex\n\t\t\t\tswitch (vtype)\n\t\t\t\t{\n\t\t\t\tcase 1: // P\n\t\t\t\t{\n\t\t\t\t\tvVert.Position = algorithm::getElement(iPositions, svert[0]);\n\t\t\t\t\tvVert.TextureCoordinate = Vector2(0, 0);\n\t\t\t\t\tnoNormal = true;\n\t\t\t\t\toVerts.push_back(vVert);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase 2: // P/T\n\t\t\t\t{\n\t\t\t\t\tvVert.Position = algorithm::getElement(iPositions, svert[0]);\n\t\t\t\t\tvVert.TextureCoordinate = algorithm::getElement(iTCoords, svert[1]);\n\t\t\t\t\tnoNormal = true;\n\t\t\t\t\toVerts.push_back(vVert);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase 3: // P//N\n\t\t\t\t{\n\t\t\t\t\tvVert.Position = algorithm::getElement(iPositions, svert[0]);\n\t\t\t\t\tvVert.TextureCoordinate = Vector2(0, 0);\n\t\t\t\t\tvVert.Normal = algorithm::getElement(iNormals, svert[2]);\n\t\t\t\t\toVerts.push_back(vVert);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tcase 4: // P/T/N\n\t\t\t\t{\n\t\t\t\t\tvVert.Position = algorithm::getElement(iPositions, svert[0]);\n\t\t\t\t\tvVert.TextureCoordinate = algorithm::getElement(iTCoords, svert[1]);\n\t\t\t\t\tvVert.Normal = algorithm::getElement(iNormals, svert[2]);\n\t\t\t\t\toVerts.push_back(vVert);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t{\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// take care of missing normals\n\t\t\t// these may not be truly acurate but it is the \n\t\t\t// best they get for not compiling a mesh with normals\t\n\t\t\tif (noNormal)\n\t\t\t{\n\t\t\t\tVector3 A = oVerts[0].Position - oVerts[1].Position;\n\t\t\t\tVector3 B = oVerts[2].Position - oVerts[1].Position;\n\n\t\t\t\tVector3 normal = math::CrossV3(A, B);\n\n\t\t\t\tfor (int i = 0; i < int(oVerts.size()); i++)\n\t\t\t\t{\n\t\t\t\t\toVerts[i].Normal = normal;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Triangulate a list of vertices into a face by printing\n\t\t//\tinducies corresponding with triangles within it\n\t\tvoid VertexTriangluation(std::vector<unsigned int>& oIndices,\n\t\t\tconst std::vector<Vertex>& iVerts)\n\t\t{\n\t\t\t// If there are 2 or less verts,\n\t\t\t// no triangle can be created,\n\t\t\t// so exit\n\t\t\tif (iVerts.size() < 3)\n\t\t\t{\n\t\t\t\treturn;\n\t\t\t}\n\t\t\t// If it is a triangle no need to calculate it\n\t\t\tif (iVerts.size() == 3)\n\t\t\t{\n\t\t\t\toIndices.push_back(0);\n\t\t\t\toIndices.push_back(1);\n\t\t\t\toIndices.push_back(2);\n\t\t\t\treturn;\n\t\t\t}\n\n\t\t\t// Create a list of vertices\n\t\t\tstd::vector<Vertex> tVerts = iVerts;\n\n\t\t\twhile (true)\n\t\t\t{\n\t\t\t\t// For every vertex\n\t\t\t\tfor (int i = 0; i < int(tVerts.size()); i++)\n\t\t\t\t{\n\t\t\t\t\t// pPrev = the previous vertex in the list\n\t\t\t\t\tVertex pPrev;\n\t\t\t\t\tif (i == 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tpPrev = tVerts[tVerts.size() - 1];\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tpPrev = tVerts[i - 1];\n\t\t\t\t\t}\n\n\t\t\t\t\t// pCur = the current vertex;\n\t\t\t\t\tVertex pCur = tVerts[i];\n\n\t\t\t\t\t// pNext = the next vertex in the list\n\t\t\t\t\tVertex pNext;\n\t\t\t\t\tif (i == tVerts.size() - 1)\n\t\t\t\t\t{\n\t\t\t\t\t\tpNext = tVerts[0];\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tpNext = tVerts[i + 1];\n\t\t\t\t\t}\n\n\t\t\t\t\t// Check to see if there are only 3 verts left\n\t\t\t\t\t// if so this is the last triangle\n\t\t\t\t\tif (tVerts.size() == 3)\n\t\t\t\t\t{\n\t\t\t\t\t\t// Create a triangle from pCur, pPrev, pNext\n\t\t\t\t\t\tfor (int j = 0; j < int(tVerts.size()); j++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif (iVerts[j].Position == pCur.Position)\n\t\t\t\t\t\t\t\toIndices.push_back(j);\n\t\t\t\t\t\t\tif (iVerts[j].Position == pPrev.Position)\n\t\t\t\t\t\t\t\toIndices.push_back(j);\n\t\t\t\t\t\t\tif (iVerts[j].Position == pNext.Position)\n\t\t\t\t\t\t\t\toIndices.push_back(j);\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttVerts.clear();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\tif (tVerts.size() == 4)\n\t\t\t\t\t{\n\t\t\t\t\t\t// Create a triangle from pCur, pPrev, pNext\n\t\t\t\t\t\tfor (int j = 0; j < int(iVerts.size()); j++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif (iVerts[j].Position == pCur.Position)\n\t\t\t\t\t\t\t\toIndices.push_back(j);\n\t\t\t\t\t\t\tif (iVerts[j].Position == pPrev.Position)\n\t\t\t\t\t\t\t\toIndices.push_back(j);\n\t\t\t\t\t\t\tif (iVerts[j].Position == pNext.Position)\n\t\t\t\t\t\t\t\toIndices.push_back(j);\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tVector3 tempVec;\n\t\t\t\t\t\tfor (int j = 0; j < int(tVerts.size()); j++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif (tVerts[j].Position != pCur.Position\n\t\t\t\t\t\t\t\t&& tVerts[j].Position != pPrev.Position\n\t\t\t\t\t\t\t\t&& tVerts[j].Position != pNext.Position)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\ttempVec = tVerts[j].Position;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Create a triangle from pCur, pPrev, pNext\n\t\t\t\t\t\tfor (int j = 0; j < int(iVerts.size()); j++)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif (iVerts[j].Position == pPrev.Position)\n\t\t\t\t\t\t\t\toIndices.push_back(j);\n\t\t\t\t\t\t\tif (iVerts[j].Position == pNext.Position)\n\t\t\t\t\t\t\t\toIndices.push_back(j);\n\t\t\t\t\t\t\tif (iVerts[j].Position == tempVec)\n\t\t\t\t\t\t\t\toIndices.push_back(j);\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\ttVerts.clear();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\n\t\t\t\t\t// If Vertex is not an interior vertex\n\t\t\t\t\tfloat angle = static_cast<float>(math::AngleBetweenV3(pPrev.Position - pCur.Position, pNext.Position - pCur.Position) * (180 / 3.14159265359));\n\t\t\t\t\tif (angle <= 0 && angle >= 180)\n\t\t\t\t\t\tcontinue;\n\n\t\t\t\t\t// If any vertices are within this triangle\n\t\t\t\t\tbool inTri = false;\n\t\t\t\t\tfor (int j = 0; j < int(iVerts.size()); j++)\n\t\t\t\t\t{\n\t\t\t\t\t\tif (algorithm::inTriangle(iVerts[j].Position, pPrev.Position, pCur.Position, pNext.Position)\n\t\t\t\t\t\t\t&& iVerts[j].Position != pPrev.Position\n\t\t\t\t\t\t\t&& iVerts[j].Position != pCur.Position\n\t\t\t\t\t\t\t&& iVerts[j].Position != pNext.Position)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tinTri = true;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif (inTri)\n\t\t\t\t\t\tcontinue;\n\n\t\t\t\t\t// Create a triangle from pCur, pPrev, pNext\n\t\t\t\t\tfor (int j = 0; j < int(iVerts.size()); j++)\n\t\t\t\t\t{\n\t\t\t\t\t\tif (iVerts[j].Position == pCur.Position)\n\t\t\t\t\t\t\toIndices.push_back(j);\n\t\t\t\t\t\tif (iVerts[j].Position == pPrev.Position)\n\t\t\t\t\t\t\toIndices.push_back(j);\n\t\t\t\t\t\tif (iVerts[j].Position == pNext.Position)\n\t\t\t\t\t\t\toIndices.push_back(j);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Delete pCur from the list\n\t\t\t\t\tfor (int j = 0; j < int(tVerts.size()); j++)\n\t\t\t\t\t{\n\t\t\t\t\t\tif (tVerts[j].Position == pCur.Position)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttVerts.erase(tVerts.begin() + j);\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t// reset i to the start\n\t\t\t\t\t// -1 since loop will add 1 to it\n\t\t\t\t\ti = -1;\n\t\t\t\t}\n\n\t\t\t\t// if no triangles were created\n\t\t\t\tif (oIndices.size() == 0)\n\t\t\t\t\tbreak;\n\n\t\t\t\t// if no more vertices\n\t\t\t\tif (tVerts.size() == 0)\n\t\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\n\t\t// Load Materials from .mtl file\n\t\tbool LoadMaterials(std::string path)\n\t\t{\n\t\t\t// If the file is not a material file return false\n\t\t\tif (path.substr(path.size() - 4, path.size()) != \".mtl\")\n\t\t\t\treturn false;\n\n\t\t\tstd::ifstream file(path);\n\n\t\t\t// If the file is not found return false\n\t\t\tif (!file.is_open())\n\t\t\t\treturn false;\n\n\t\t\tMaterial tempMaterial;\n\n\t\t\tbool listening = false;\n\n\t\t\t// Go through each line looking for material variables\n\t\t\tstd::string curline;\n\t\t\twhile (std::getline(file, curline))\n\t\t\t{\n\t\t\t\t// new material and material name\n\t\t\t\tif (algorithm::firstToken(curline) == \"newmtl\")\n\t\t\t\t{\n\t\t\t\t\tif (!listening)\n\t\t\t\t\t{\n\t\t\t\t\t\tlistening = true;\n\n\t\t\t\t\t\tif (curline.size() > 7)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttempMaterial.name = algorithm::tail(curline);\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttempMaterial.name = \"none\";\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\t// Generate the material\n\n\t\t\t\t\t\t// Push Back loaded Material\n\t\t\t\t\t\tLoadedMaterials.push_back(tempMaterial);\n\n\t\t\t\t\t\t// Clear Loaded Material\n\t\t\t\t\t\ttempMaterial = Material();\n\n\t\t\t\t\t\tif (curline.size() > 7)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttempMaterial.name = algorithm::tail(curline);\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\ttempMaterial.name = \"none\";\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Ambient Color\n\t\t\t\tif (algorithm::firstToken(curline) == \"Ka\")\n\t\t\t\t{\n\t\t\t\t\tstd::vector<std::string> temp;\n\t\t\t\t\talgorithm::split(algorithm::tail(curline), temp, \" \");\n\n\t\t\t\t\tif (temp.size() != 3)\n\t\t\t\t\t\tcontinue;\n\n\t\t\t\t\ttempMaterial.Ka.X = std::stof(temp[0]);\n\t\t\t\t\ttempMaterial.Ka.Y = std::stof(temp[1]);\n\t\t\t\t\ttempMaterial.Ka.Z = std::stof(temp[2]);\n\t\t\t\t}\n\t\t\t\t// Diffuse Color\n\t\t\t\tif (algorithm::firstToken(curline) == \"Kd\")\n\t\t\t\t{\n\t\t\t\t\tstd::vector<std::string> temp;\n\t\t\t\t\talgorithm::split(algorithm::tail(curline), temp, \" \");\n\n\t\t\t\t\tif (temp.size() != 3)\n\t\t\t\t\t\tcontinue;\n\n\t\t\t\t\ttempMaterial.Kd.X = std::stof(temp[0]);\n\t\t\t\t\ttempMaterial.Kd.Y = std::stof(temp[1]);\n\t\t\t\t\ttempMaterial.Kd.Z = std::stof(temp[2]);\n\t\t\t\t}\n\t\t\t\t// Specular Color\n\t\t\t\tif (algorithm::firstToken(curline) == \"Ks\")\n\t\t\t\t{\n\t\t\t\t\tstd::vector<std::string> temp;\n\t\t\t\t\talgorithm::split(algorithm::tail(curline), temp, \" \");\n\n\t\t\t\t\tif (temp.size() != 3)\n\t\t\t\t\t\tcontinue;\n\n\t\t\t\t\ttempMaterial.Ks.X = std::stof(temp[0]);\n\t\t\t\t\ttempMaterial.Ks.Y = std::stof(temp[1]);\n\t\t\t\t\ttempMaterial.Ks.Z = std::stof(temp[2]);\n\t\t\t\t}\n\t\t\t\t// Specular Exponent\n\t\t\t\tif (algorithm::firstToken(curline) == \"Ns\")\n\t\t\t\t{\n\t\t\t\t\ttempMaterial.Ns = std::stof(algorithm::tail(curline));\n\t\t\t\t}\n\t\t\t\t// Optical Density\n\t\t\t\tif (algorithm::firstToken(curline) == \"Ni\")\n\t\t\t\t{\n\t\t\t\t\ttempMaterial.Ni = std::stof(algorithm::tail(curline));\n\t\t\t\t}\n\t\t\t\t// Dissolve\n\t\t\t\tif (algorithm::firstToken(curline) == \"d\")\n\t\t\t\t{\n\t\t\t\t\ttempMaterial.d = std::stof(algorithm::tail(curline));\n\t\t\t\t}\n\t\t\t\t// Illumination\n\t\t\t\tif (algorithm::firstToken(curline) == \"illum\")\n\t\t\t\t{\n\t\t\t\t\ttempMaterial.illum = std::stoi(algorithm::tail(curline));\n\t\t\t\t}\n\t\t\t\t// Ambient Texture Map\n\t\t\t\tif (algorithm::firstToken(curline) == \"map_Ka\")\n\t\t\t\t{\n\t\t\t\t\ttempMaterial.map_Ka = algorithm::tail(curline);\n\t\t\t\t}\n\t\t\t\t// Diffuse Texture Map\n\t\t\t\tif (algorithm::firstToken(curline) == \"map_Kd\")\n\t\t\t\t{\n\t\t\t\t\ttempMaterial.map_Kd = algorithm::tail(curline);\n\t\t\t\t}\n\t\t\t\t// Specular Texture Map\n\t\t\t\tif (algorithm::firstToken(curline) == \"map_Ks\")\n\t\t\t\t{\n\t\t\t\t\ttempMaterial.map_Ks = algorithm::tail(curline);\n\t\t\t\t}\n\t\t\t\t// Specular Hightlight Map\n\t\t\t\tif (algorithm::firstToken(curline) == \"map_Ns\")\n\t\t\t\t{\n\t\t\t\t\ttempMaterial.map_Ns = algorithm::tail(curline);\n\t\t\t\t}\n\t\t\t\t// Alpha Texture Map\n\t\t\t\tif (algorithm::firstToken(curline) == \"map_d\")\n\t\t\t\t{\n\t\t\t\t\ttempMaterial.map_d = algorithm::tail(curline);\n\t\t\t\t}\n\t\t\t\t// Bump Map\n\t\t\t\tif (algorithm::firstToken(curline) == \"map_Bump\" || algorithm::firstToken(curline) == \"map_bump\" || algorithm::firstToken(curline) == \"bump\")\n\t\t\t\t{\n\t\t\t\t\ttempMaterial.map_bump = algorithm::tail(curline);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Deal with last material\n\n\t\t\t// Push Back loaded Material\n\t\t\tLoadedMaterials.push_back(tempMaterial);\n\n\t\t\t// Test to see if anything was loaded\n\t\t\t// If not return false\n\t\t\tif (LoadedMaterials.empty())\n\t\t\t\treturn false;\n\t\t\t// If so return true\n\t\t\telse\n\t\t\t\treturn true;\n\t\t}\n\t};\n}" }, { "alpha_fraction": 0.6860499382019043, "alphanum_fraction": 0.6969162821769714, "avg_line_length": 30.52777862548828, "blob_id": "713b426c42d2662868df96000eec19dff3b113e1", "content_id": "9aa10799e058d0a747edc15defe6d48b5dfd46d2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3405, "license_type": "permissive", "max_line_length": 122, "num_lines": 108, "path": "/Sources/Core/Graphics/SceneGraph/Mesh.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Mesh.hpp\"\n\n#include <glew.h>\n\n#include \"../../Helpers/Debug.hpp\"\n\nCore::Graphics::SceneGraph::Mesh::Mesh(Texture2D texture, std::vector<float>&& elements, unsigned count_elements) noexcept\n : m_texture(texture), m_elements(elements), m_count_elements(count_elements)\n{\n generateIdentifiers();\n bindDataIdentifiers();\n fillBuffersData();\n unbindDataIdentifiers();\n}\n\nvoid Core::Graphics::SceneGraph::Mesh::draw() const noexcept\n{\n m_texture.bind();\n glBindVertexArray(m_vao);\n glDrawArrays(GL_TRIANGLES, 0, m_count_elements);\n glBindVertexArray(NULL);\n m_texture.unbind();\n}\n\nvoid Core::Graphics::SceneGraph::Mesh::free() noexcept\n{\n#ifdef _DEBUG\n unsigned int result = 0u;\n#endif // _DEBUG\n\n glDeleteBuffers(1, &m_vbo);\n#ifdef _DEBUG\n LOG_WARNING_IF((result = glGetError()) == GL_INVALID_VALUE, \"VBO data were not deleted.\");\n#endif // _DEBUG\n\n glDeleteVertexArrays(1, &m_vao);\n#ifdef _DEBUG\n LOG_WARNING_IF((result = glGetError()) == GL_INVALID_VALUE, \"VAO data were not deleted.\");\n#endif // _DEBUG\n\n m_texture.free();\n m_elements.shrink_to_fit();\n\n m_vbo = 0u;\n m_vao = 0u;\n m_count_elements = 0u;\n}\n\nvoid Core::Graphics::SceneGraph::Mesh::generateIdentifiers()\n{\n glGenBuffers(1, &m_vbo);\n glGenVertexArrays(1, &m_vao);\n\n#ifdef _DEBUG\n LOG_WARNING_IF(m_vao == 0, \"ID for vertex array objects was not generated.\");\n LOG_WARNING_IF(m_vbo == 0, \"ID for vertex buffer object was not generated.\");\n#endif // _DEBUG\n}\n\nvoid Core::Graphics::SceneGraph::Mesh::bindDataIdentifiers()\n{\n glBindVertexArray(m_vao);\n glBindBuffer(GL_ARRAY_BUFFER, m_vbo);\n}\n\nvoid Core::Graphics::SceneGraph::Mesh::unbindDataIdentifiers()\n{\n glBindVertexArray(NULL);\n glBindBuffer(GL_ARRAY_BUFFER, NULL);\n}\n\nvoid Core::Graphics::SceneGraph::Mesh::fillBuffersData()\n{\n constexpr std::uint8_t ALIGNMENT_VERTEX = 0u;\n constexpr std::uint8_t ALIGNMENT_TEXTURE_COORDINATE = 3u;\n constexpr std::uint8_t ALIGNMENT_NORMAL = 5u;\n constexpr std::uint8_t SIZE_ELEMENT = ALIGNMENT_NORMAL + 3u;\n\n glBufferData(GL_ARRAY_BUFFER, static_cast<ptrdiff_t>(m_count_elements * SIZE_ELEMENT * sizeof(float)), \n m_elements.data(), GL_STATIC_DRAW);\n\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE,\n SIZE_ELEMENT * sizeof(float), reinterpret_cast<void*>(ALIGNMENT_VERTEX * sizeof(float)));\n glEnableVertexAttribArray(0);\n glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE,\n SIZE_ELEMENT * sizeof(float), reinterpret_cast<void*>(ALIGNMENT_TEXTURE_COORDINATE * sizeof(float)));\n glEnableVertexAttribArray(1);\n glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE,\n SIZE_ELEMENT * sizeof(float), reinterpret_cast<void*>(ALIGNMENT_NORMAL * sizeof(float)));\n glEnableVertexAttribArray(2);\n}\n" }, { "alpha_fraction": 0.6815920472145081, "alphanum_fraction": 0.6940298676490784, "avg_line_length": 30.736841201782227, "blob_id": "c41672a1690326fdb60dfd387acadd994d733f8b", "content_id": "99755df3cdaf9491e77453496cf4203ceb40cafa", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1206, "license_type": "permissive", "max_line_length": 114, "num_lines": 38, "path": "/Sources/Core/Resources/Loaders/ImageLoader.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"ImageLoader.hpp\"\n\n#include <SOIL.h>\n\n#include \"../Image.hpp\"\n\nbool Core::Resources::Loaders::ImageLoader::load(Image& image, std::string_view image_path) noexcept\n{\n int image_width = 0;\n int image_height = 0;\n unsigned char* image_data = SOIL_load_image(image_path.data(), &image_width, &image_height, 0, SOIL_LOAD_RGB);\n if (image_data)\n {\n image.setData(image_data);\n image.setWidth(static_cast<std::uint16_t>(image_width));\n image.setHeight(static_cast<std::uint16_t>(image_height));\n return true;\n }\n\n return false;\n}\n" }, { "alpha_fraction": 0.5785896182060242, "alphanum_fraction": 0.5824128985404968, "avg_line_length": 33.115943908691406, "blob_id": "75c636f62f5ebe069460787c81c85a693413eeb2", "content_id": "7610bbeeac1465f5116216377f20633bbabad59c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2354, "license_type": "permissive", "max_line_length": 98, "num_lines": 69, "path": "/Documentation/COMMITS.md", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "# Commits style convention\n\nOur style is to create commits almost fully complies with the \n<a href=\"https://www.conventionalcommits.org/en/v1.0.0-beta.3/\">The Conventional Commits.</a>\nBut there are also some specific changes, so please read this \ndocument to the end.\n\n## Commit structure\n\nThe commit message should be structured as follows:\n\n---\n\n <type>(optional scope): <description>\n\n <optional body>\n\n <optional footer>\n\n---\n\n* In the body of a commit we write what has been changed and why.\n\n* We use the following types of commits:\n\n| Commit type | Description | \n|:-----------:|:----------------------------------------------------------:|\n| build | build of the project or change external dependencies |\n| ci | configuring of CI and working with scripts |\n| docs | documentation updating |\n| feat | adding new functionality |\n| fix | bug fix |\n| perf | changes to improve performance |\n| refactor | edit code without correcting errors or adding new features |\n| revert | rollback to previous commits |\n| style | code style edits(tabs, indents, full stops, commas, etc.) |\n| test | adding tests |\n\n* We write the description in the imperative mood, just like Git itself.\n\n\n Merge branch 'fix/SECRETMRKT-749-fix-typos-in-titles'\n \n* The description consists only of lowercase letters and we do not download the description of \nthe commit with punctuation marks. \n\n## Commit short examples\n\n* Commit message with description and breaking change in body\n\n feat: allow provided config object to extend other configs\n\n BREAKING CHANGE: `extends` key in config file is now used for extending other config files\n\n* Commit message with no body\n\n docs: correct spelling of CHANGELOG\n\n* Commit message with scope\n\n feat(language): added russian language\n \n* Commit message for a fix using an (optional) issue number.\n\n fix: minor typos in code\n\n see the issue for details on the typos fixed\n\n fixes issue #12\n" }, { "alpha_fraction": 0.7047204375267029, "alphanum_fraction": 0.7161031365394592, "avg_line_length": 28.8700008392334, "blob_id": "505d5dffc8716a959ea21e2094bea7f171923812", "content_id": "ea1f9f0080781fdd88fa88f2bd11b67fd5efdd80", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2987, "license_type": "permissive", "max_line_length": 117, "num_lines": 100, "path": "/Sources/Core/Graphics/SceneGraph/Node.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Node.hpp\"\n\n#include <glm/gtc/matrix_transform.hpp>\n\nvoid Core::Graphics::SceneGraph::Node::addChild(NodeSPtr node) noexcept\n{\n m_children.push_back(node);\n}\n\nvoid Core::Graphics::SceneGraph::Node::removeChild(NodeSPtr node) noexcept\n{\n const auto it = std::find(begin(m_children), end(m_children), node);\n if (it != end(m_children))\n {\n m_children.erase(it);\n }\n}\n\nvoid Core::Graphics::SceneGraph::Node::rotateByX(float degrees) noexcept\n{\n m_transformation = rotate(m_transformation, glm::radians(degrees), glm::vec3(1.0f, 0.0f, 0.0f));\n}\n\nvoid Core::Graphics::SceneGraph::Node::rotateByY(float degrees) noexcept\n{\n m_transformation = rotate(m_transformation, glm::radians(degrees), glm::vec3(0.0f, 1.0f, 0.0f));\n}\n\nvoid Core::Graphics::SceneGraph::Node::rotateByZ(float degrees) noexcept\n{\n m_transformation = rotate(m_transformation, glm::radians(degrees), glm::vec3(0.0f, 0.0f, 1.0f));\n}\n\nvoid Core::Graphics::SceneGraph::Node::scale(float value) noexcept\n{\n m_transformation = glm::scale(m_transformation, { value, value, value });\n}\n\nvoid Core::Graphics::SceneGraph::Node::move(const glm::vec3& position) noexcept\n{\n m_transformation = translate(m_transformation, position);\n}\n\nvoid Core::Graphics::SceneGraph::Node::move(const glm::vec4& position) noexcept\n{\n m_transformation = translate(m_transformation, glm::vec3{ position.x, position.y, position.z });\n}\n\nbool Core::Graphics::SceneGraph::Node::isExistChildren() const noexcept\n{\n return !m_children.empty();\n}\n\nbool Core::Graphics::SceneGraph::Node::isExitChild(NodeSPtr node) const noexcept\n{\n const auto it = std::find(begin(m_children), end(m_children), node);\n return it != end(m_children);\n}\n\nvoid Core::Graphics::SceneGraph::Node::setMesh(const Mesh* mesh) noexcept\n{\n m_mesh = mesh;\n}\n\nconst Core::Graphics::SceneGraph::Mesh* Core::Graphics::SceneGraph::Node::getMesh() const noexcept\n{\n return m_mesh;\n}\n\nstd::deque<Core::Graphics::SceneGraph::NodeSPtr>::iterator Core::Graphics::SceneGraph::Node::childrenBegin() noexcept\n{\n return begin(m_children);\n}\n\nstd::deque<Core::Graphics::SceneGraph::NodeSPtr>::iterator Core::Graphics::SceneGraph::Node::childrenEnd() noexcept\n{\n return end(m_children);\n}\n\nconst glm::mat4x4& Core::Graphics::SceneGraph::Node::getTransformation() const noexcept\n{\n return m_transformation;\n}\n" }, { "alpha_fraction": 0.6925466060638428, "alphanum_fraction": 0.7018633484840393, "avg_line_length": 29.66666603088379, "blob_id": "96bbd8585dee4002d2c4eda362dda87031505e9e", "content_id": "5fb3f5516d89467fd1730cc1dea1409f17ca9389", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2576, "license_type": "permissive", "max_line_length": 95, "num_lines": 84, "path": "/Sources/Core/GUI/Window.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n\n#define GLEW_STATIC\n\n#include \"Window.hpp\"\n\n#include <glew.h>\n#include <GLFW/glfw3.h>\n\n#include \"../Helpers/Debug.hpp\"\n#include \"../Input/InputEventHandler.hpp\"\n\nCore::GUI::Window::Window(const int width, const int height, const std::string& title) noexcept\n{\n const int was_initialized = glfwInit();\n LOG_ERROR_IF(!was_initialized, \"GLFW library was not initialized.\");\n glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2);\n glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0);\n m_window = glfwCreateWindow(width, height, title.c_str(), nullptr, nullptr);\n LOG_ERROR_IF(!m_window, \"Window was not created.\");\n}\n\nvoid Core::GUI::Window::show() noexcept\n{\n initGLContext();\n initEventHandlers();\n\n while (!glfwWindowShouldClose(m_window))\n {\n int window_width = 0;\n int window_height = 0;\n glViewport(0, 0, window_width, window_height);\n glClearColor(0.3f, 0.7f, 0.9f, 1.0f);\n glfwGetWindowSize(m_window, &window_width, &window_height);\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\n glfwSwapBuffers(m_window);\n glfwPollEvents();\n }\n}\n\nvoid Core::GUI::Window::initGLContext() const noexcept\n{\n glfwMakeContextCurrent(m_window);\n glewExperimental = GL_TRUE;\n glfwSwapInterval(1);\n\n const unsigned int initialization_result = glewInit();\n LOG_ERROR_IF(initialization_result != GLEW_OK, \"GLEW library was not initialized.\");\n\n glEnable(GL_BLEND);\n glEnable(GL_DEPTH_TEST);\n glEnable(GL_MULTISAMPLE);\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);\n}\n\nvoid Core::GUI::Window::initEventHandlers() const noexcept\n{\n glfwSetErrorCallback(Input::onInputError);\n glfwSetKeyCallback(m_window, Input::onKeyboardEvent);\n glfwSetCursorPosCallback(m_window, Input::onMouseMoveEvent);\n glfwSetMouseButtonCallback(m_window, Input::onMouseClickEvent);\n}\n\nCore::GUI::Window::~Window()\n{\n glfwDestroyWindow(m_window);\n glfwTerminate();\n}\n" }, { "alpha_fraction": 0.6171389222145081, "alphanum_fraction": 0.6496198773384094, "avg_line_length": 29.787233352661133, "blob_id": "4ef1e397eb1d0e0971ead1181668c96691ae4a24", "content_id": "cc38ff3d987a5f44490c6d4ae2e50f7b7b51a055", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1447, "license_type": "permissive", "max_line_length": 75, "num_lines": 47, "path": "/Sources/Core/Graphics/Camera.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <glm/vec3.hpp>\n#include <glm/mat4x4.hpp>\n\nnamespace Core::Graphics\n{\n\n class Camera\n {\n public:\n void moveLeft() noexcept;\n void moveRight() noexcept;\n void moveForward() noexcept;\n void moveBackward() noexcept;\n void setSpeed(float speed) noexcept;\n void turn(int x_offset, int y_offset) noexcept;\n const glm::vec3& getPosition() const noexcept;\n glm::mat4x4 getViewMatrix() const noexcept;\n glm::mat4x4 getProjectionMatrix() const noexcept;\n private:\n float m_speed = 0.0f;\n float m_fov = 45.0f;\n float m_yaw_angle = -90.0f;\n float m_pitch_angle = 0.0f;\n glm::vec3 m_position{ 0.0f, 0.0f, 3.0f };\n glm::vec3 m_up_direction{ 0.0f, 1.0f, 0.0f };\n glm::vec3 m_forward_direction{ 0.0f, 0.0f, -1.0f };\n };\n\n}\n" }, { "alpha_fraction": 0.6982175707817078, "alphanum_fraction": 0.7043638825416565, "avg_line_length": 33.61701965332031, "blob_id": "899b8309e381ed6c393d312c23205e1a510f9f11", "content_id": "a4fe1ff9c4c94d357f9ed98dd82a4f7a87fdd296", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1627, "license_type": "permissive", "max_line_length": 91, "num_lines": 47, "path": "/Sources/Core/Resources/Material.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <string>\n\n#include \"IResource.hpp\"\n#include \"../Helpers/Holders/Polymorphic.hpp\"\n\nnamespace Core::Resources\n{\n\n class Material final : public IResource, public Helpers::Holders::Polymorphic<Material>\n {\n public:\n void setShininess(float shininess) noexcept;\n void setAmbientTextureName(std::string&& texture_name) noexcept;\n void setDiffuseTextureName(std::string&& texture_name) noexcept;\n void setSpecularTextureName(std::string&& texture_name) noexcept;\n float getShininess() const noexcept;\n std::string_view getAmbientTextureName() const noexcept;\n std::string_view getDiffuseTextureName() const noexcept;\n std::string_view getSpecularTextureName() const noexcept;\n private:\n bool load(std::string_view material_path) noexcept override;\n private:\n float m_shininess = 0.0f;\n std::string m_ambient_texture_name{};\n std::string m_diffuse_texture_name{};\n std::string m_specular_texture_name{};\n };\n\n}\n" }, { "alpha_fraction": 0.6451779007911682, "alphanum_fraction": 0.6465532183647156, "avg_line_length": 37.779998779296875, "blob_id": "5e41762700a454dac492ca0efd90a6f626a25ff2", "content_id": "f437416810998d16b600ee4243e1b3b4d57bd8fd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5817, "license_type": "permissive", "max_line_length": 113, "num_lines": 150, "path": "/Sources/Core/Managers/ResourceManager.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <optional>\n#include <boost/property_tree/ptree.hpp>\n\n#include \"IManager.hpp\"\n#include \"../ManagersFWD.hpp\"\n#include \"../Resources.hpp\"\n#include \"../Resources/IResource.hpp\"\n#include \"../Resources/ResourceTypes.hpp\"\n#include \"../Helpers/Debug.hpp\"\n#include \"../Helpers/Holders/Singleton.hpp\"\n\n#ifndef g_resource_manager\n #define g_resource_manager Core::Managers::ResourceManager::getInstance()\n#endif // g_resource_manager\n\nnamespace Core::Managers\n{\n\n class ResourceManager : public IManager<ResourceManager>, public Helpers::Holders::Singleton<ResourceManager>\n {\n using resources_map_t = std::unordered_map<std::string, Resources::IResourceSPtr>;\n public:\n void initialize();\n template<typename T>\n std::shared_ptr<T> getResource(const std::string& resource_id) const noexcept;\n template<typename T>\n std::optional<T> loadResource(const std::string& resource_path) const;\n template<typename T>\n void loadResource(const std::string& resource_id, std::shared_ptr<T> resource) noexcept;\n template<typename T>\n bool isExistsResource(const std::string& resource_id) noexcept;\n private:\n template<typename T>\n void loadSection(const boost::property_tree::ptree& section) noexcept;\n template<typename T>\n void loadResource(const std::string& resource_id, std::string_view resource_path) noexcept;\n private:\n std::array<resources_map_t, TO_SIZE_T(Resources::ResourceType::COUNT_TYPES)> m_resources{};\n };\n\n template<typename T>\n std::shared_ptr<T> ResourceManager::getResource(const std::string& resource_id) const noexcept\n {\n constexpr Resources::ResourceType resource_type = Resources::getResourceType<T>();\n if (resource_type != Resources::ResourceType::UNKNOWN)\n {\n const auto resource_type_index = TO_SIZE_T(resource_type);\n const resources_map_t& resources_for_current_type = m_resources.at(resource_type_index);\n const auto it = resources_for_current_type.find(resource_id);\n if (it != end(resources_for_current_type))\n {\n return std::dynamic_pointer_cast<T>(it->second);\n }\n }\n\n return nullptr;\n }\n\n template<typename T>\n std::optional<T> ResourceManager::loadResource(const std::string& resource_path) const\n {\n std::optional<T> resource = T{};\n const bool sucessfull_result = resource->load(resource_path);\n if (!sucessfull_result)\n {\n LOG_WARNING(\"Resource {'\" + STR(resource_path) + \"'} was not loaded.\");\n return std::nullopt;\n }\n\n return resource;\n }\n\n template<typename T>\n void ResourceManager::loadResource(const std::string& resource_id, std::shared_ptr<T> resource) noexcept\n {\n constexpr Resources::ResourceType resource_type = Resources::getResourceType<T>();\n if (resource_type != Resources::ResourceType::UNKNOWN)\n {\n const auto resource_type_index = TO_SIZE_T(resource_type);\n resources_map_t& resources_for_current_type = m_resources.at(resource_type_index);\n resources_for_current_type.emplace(resource_id, resource);\n }\n }\n\n template<typename T>\n bool ResourceManager::isExistsResource(const std::string& resource_id) noexcept\n {\n constexpr Resources::ResourceType resource_type = Resources::getResourceType<T>();\n if (resource_type != Resources::ResourceType::UNKNOWN)\n {\n const auto resource_type_index = TO_SIZE_T(resource_type);\n resources_map_t& resources_for_current_type = m_resources.at(resource_type_index);\n const auto it = resources_for_current_type.find(resource_id);\n return it != end(resources_for_current_type);\n }\n\n return false;\n }\n\n template<typename T>\n void ResourceManager::loadSection(const boost::property_tree::ptree& section) noexcept\n {\n const std::string& resources_path = STR(g_configuration_manager.getResourcesPath());\n for (const auto& data : section)\n {\n const std::string resource_id = data.first;\n const std::string resource_path = resources_path + data.second.get_value(\"\");\n loadResource<T>(resource_id, resource_path);\n }\n }\n\n template<typename T>\n void ResourceManager::loadResource(const std::string& resource_id, std::string_view resource_path) noexcept\n {\n constexpr Resources::ResourceType resource_type = Resources::getResourceType<T>();\n if (resource_type != Resources::ResourceType::UNKNOWN)\n {\n const auto resource_type_index = TO_SIZE_T(resource_type);\n resources_map_t& resources_for_current_type = m_resources.at(resource_type_index);\n auto resource = std::make_shared<T>();\n if (resource->load(resource_path))\n {\n resources_for_current_type.emplace(resource_id, resource);\n }\n else\n {\n LOG_WARNING(\"Resource {'\" + resource_id + \"' : '\" + STR(resource_path) + \"'} was not loaded.\");\n }\n }\n }\n\n}\n" }, { "alpha_fraction": 0.6743232011795044, "alphanum_fraction": 0.690730094909668, "avg_line_length": 31.078947067260742, "blob_id": "2b5f362bf37731bf92603618c9416abc661be81b", "content_id": "5e65b68d4c07fbecd971a781d89592540638d257", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1219, "license_type": "permissive", "max_line_length": 114, "num_lines": 38, "path": "/Sources/Core/Graphics/SceneGraph/Texture2D.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\nnamespace Core::Graphics::SceneGraph\n{\n\n class Texture2D\n {\n public:\n Texture2D() noexcept = default;\n Texture2D(std::uint16_t width, std::uint16_t height, const unsigned char* data) noexcept;\n void bind() const noexcept;\n void unbind() const noexcept;\n void free() noexcept;\n private:\n void generateTextureIdentifier() noexcept;\n void setTextureParameters() const noexcept;\n void fillTextureData(std::uint16_t width, std::uint16_t height, const unsigned char* data) const noexcept;\n private:\n unsigned int m_texture_id = 0u;\n };\n\n}\n" }, { "alpha_fraction": 0.693341851234436, "alphanum_fraction": 0.7003841400146484, "avg_line_length": 30.87755012512207, "blob_id": "34fc051ce24f784aafc5b9da323478ca650d0d77", "content_id": "c10f944b96f3ae916e412fe9ef905e33fe6af840", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1562, "license_type": "permissive", "max_line_length": 104, "num_lines": 49, "path": "/Sources/Core/Managers/SoundManager.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <string>\n#include <audiere.h>\n#include <boost/compute/detail/lru_cache.hpp>\n\n#include \"IManager.hpp\"\n#include \"../Helpers/Holders/Singleton.hpp\"\n\n#ifndef g_sound_manager\n #define g_sound_manager Core::Managers::SoundManager::getInstance()\n#endif // g_sound_manager\n\nnamespace Core::Managers\n{\n\n class SoundManager : public IManager<SoundManager>, public Helpers::Holders::Singleton<SoundManager>\n {\n static const float SOUND_VOLUME;\n static const float MUSIC_VOLUME;\n using audio_cache_t = boost::compute::detail::lru_cache<std::string, audiere::OutputStreamPtr>;\n public:\n void initialize();\n void playSound(const std::string& key);\n void playMusic(const std::string& key);\n const audiere::AudioDevicePtr& getAudioDevice() const noexcept;\n private:\n audiere::AudioDevicePtr m_device = nullptr;\n audio_cache_t m_music = 5;\n audio_cache_t m_sounds = 15;\n };\n\n}\n" }, { "alpha_fraction": 0.590308666229248, "alphanum_fraction": 0.6052439212799072, "avg_line_length": 63.65665054321289, "blob_id": "a1cbaee50e0ca87227d11397de7f44046a6d245c", "content_id": "b2f72faf6942bfdc20e39b5d87a7ea163385751a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 15065, "license_type": "permissive", "max_line_length": 227, "num_lines": 233, "path": "/Dependencies/LibXL/Include/ISheetT.h", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#ifndef LIBXL_ISHEETT_H\n#define LIBXL_ISHEETT_H\n\n#include \"setup.h\"\n#include \"enum.h\"\n\nnamespace libxl\n{\n\n template<class TCHAR> struct IFormatT;\n template<class TCHAR> struct IAutoFilterT;\n\n template<class TCHAR>\n struct ISheetT\n {\n virtual CellType XLAPIENTRY cellType(int row, int col) const = 0;\n virtual bool XLAPIENTRY isFormula(int row, int col) const = 0;\n\n virtual IFormatT<TCHAR>* XLAPIENTRY cellFormat(int row, int col) const = 0;\n virtual void XLAPIENTRY setCellFormat(int row, int col, IFormatT<TCHAR>* format) = 0;\n\n virtual const TCHAR* XLAPIENTRY readStr(int row, int col, IFormatT<TCHAR>** format = 0) = 0;\n virtual bool XLAPIENTRY writeStr(int row, int col, const TCHAR* value, IFormatT<TCHAR>* format = 0, CellType type = CELLTYPE_STRING) = 0;\n\n virtual double XLAPIENTRY readNum(int row, int col, IFormatT<TCHAR>** format = 0) const = 0;\n virtual bool XLAPIENTRY writeNum(int row, int col, double value, IFormatT<TCHAR>* format = 0) = 0;\n\n virtual bool XLAPIENTRY readBool(int row, int col, IFormatT<TCHAR>** format = 0) const = 0;\n virtual bool XLAPIENTRY writeBool(int row, int col, bool value, IFormatT<TCHAR>* format = 0, int errCode = ERRORTYPE_NOERROR) = 0;\n\n virtual bool XLAPIENTRY readBlank(int row, int col, IFormatT<TCHAR>** format) const = 0;\n virtual bool XLAPIENTRY writeBlank(int row, int col, IFormatT<TCHAR>* format) = 0;\n\n virtual const TCHAR* XLAPIENTRY readFormula(int row, int col, IFormatT<TCHAR>** format = 0) = 0;\n virtual bool XLAPIENTRY writeFormula(int row, int col, const TCHAR* expr, IFormatT<TCHAR>* format = 0) = 0;\n\n virtual bool XLAPIENTRY writeFormulaNum(int row, int col, const TCHAR* expr, double value, IFormatT<TCHAR>* format = 0) = 0;\n virtual bool XLAPIENTRY writeFormulaStr(int row, int col, const TCHAR* expr, const TCHAR* value, IFormatT<TCHAR>* format = 0) = 0;\n virtual bool XLAPIENTRY writeFormulaBool(int row, int col, const TCHAR* expr, bool value, IFormatT<TCHAR>* format = 0) = 0;\n\n virtual const TCHAR* XLAPIENTRY readComment(int row, int col) const = 0;\n virtual void XLAPIENTRY writeComment(int row, int col, const TCHAR* value, const TCHAR* author = 0, int width = 129, int height = 75) = 0;\n virtual void XLAPIENTRY removeComment(int row, int col) = 0;\n\n virtual bool XLAPIENTRY isDate(int row, int col) const = 0;\n\n virtual ErrorType XLAPIENTRY readError(int row, int col) const = 0;\n virtual void XLAPIENTRY writeError(int row, int col, ErrorType error, IFormatT<TCHAR>* format = 0) = 0;\n\n virtual double XLAPIENTRY colWidth(int col) const = 0;\n virtual double XLAPIENTRY rowHeight(int row) const = 0;\n\n virtual bool XLAPIENTRY setCol(int colFirst, int colLast, double width, IFormatT<TCHAR>* format = 0, bool hidden = false) = 0;\n virtual bool XLAPIENTRY setRow(int row, double height, IFormatT<TCHAR>* format = 0, bool hidden = false) = 0;\n\n virtual bool XLAPIENTRY rowHidden(int row) const = 0;\n virtual bool XLAPIENTRY setRowHidden(int row, bool hidden) = 0;\n\n virtual bool XLAPIENTRY colHidden(int col) const = 0;\n virtual bool XLAPIENTRY setColHidden(int col, bool hidden) = 0;\n\n virtual bool XLAPIENTRY getMerge(int row, int col, int* rowFirst = 0, int* rowLast = 0, int* colFirst = 0, int* colLast = 0) = 0;\n virtual bool XLAPIENTRY setMerge(int rowFirst, int rowLast, int colFirst, int colLast) = 0;\n virtual bool XLAPIENTRY delMerge(int row, int col) = 0;\n\n virtual int XLAPIENTRY mergeSize() const = 0;\n virtual bool XLAPIENTRY merge(int index, int* rowFirst, int* rowLast, int* colFirst, int* colLast) = 0;\n virtual bool XLAPIENTRY delMergeByIndex(int index) = 0;\n\n virtual int XLAPIENTRY pictureSize() const = 0;\n virtual int XLAPIENTRY getPicture(int index, int* rowTop = 0, int* colLeft = 0, int* rowBottom = 0, int* colRight = 0,\n int* width = 0, int* height = 0, int* offset_x = 0, int* offset_y = 0, const TCHAR** linkPath = 0) const = 0;\n\n virtual void XLAPIENTRY setPicture(int row, int col, int pictureId, double scale = 1.0, int offset_x = 0, int offset_y = 0, Position pos = POSITION_MOVE_AND_SIZE) = 0;\n virtual void XLAPIENTRY setPicture2(int row, int col, int pictureId, int width = -1, int height = -1, int offset_x = 0, int offset_y = 0, Position pos = POSITION_MOVE_AND_SIZE) = 0;\n\n virtual int XLAPIENTRY getHorPageBreak(int index) const = 0;\n virtual int XLAPIENTRY getHorPageBreakSize() const = 0;\n\n virtual int XLAPIENTRY getVerPageBreak(int index) const = 0;\n virtual int XLAPIENTRY getVerPageBreakSize() const = 0;\n\n virtual bool XLAPIENTRY setHorPageBreak(int row, bool pageBreak = true) = 0;\n virtual bool XLAPIENTRY setVerPageBreak(int col, bool pageBreak = true) = 0;\n\n virtual void XLAPIENTRY split(int row, int col) = 0;\n virtual bool XLAPIENTRY splitInfo(int* row, int* col) const = 0;\n\n virtual bool XLAPIENTRY groupRows(int rowFirst, int rowLast, bool collapsed = true) = 0;\n virtual bool XLAPIENTRY groupCols(int colFirst, int colLast, bool collapsed = true) = 0;\n\n virtual bool XLAPIENTRY groupSummaryBelow() const = 0;\n virtual void XLAPIENTRY setGroupSummaryBelow(bool below) = 0;\n\n virtual bool XLAPIENTRY groupSummaryRight() const = 0;\n virtual void XLAPIENTRY setGroupSummaryRight(bool right) = 0;\n\n virtual bool XLAPIENTRY clear(int rowFirst = 0, int rowLast = 1048575, int colFirst = 0, int colLast = 16383) = 0;\n\n virtual bool XLAPIENTRY insertCol(int colFirst, int colLast, bool updateNamedRanges = true) = 0;\n virtual bool XLAPIENTRY insertRow(int rowFirst, int rowLast, bool updateNamedRanges = true) = 0;\n virtual bool XLAPIENTRY removeCol(int colFirst, int colLast, bool updateNamedRanges = true) = 0;\n virtual bool XLAPIENTRY removeRow(int rowFirst, int rowLast, bool updateNamedRanges = true) = 0;\n\n virtual bool XLAPIENTRY copyCell(int rowSrc, int colSrc, int rowDst, int colDst) = 0;\n\n virtual int XLAPIENTRY firstRow() const = 0;\n virtual int XLAPIENTRY lastRow() const = 0;\n virtual int XLAPIENTRY firstCol() const = 0;\n virtual int XLAPIENTRY lastCol() const = 0;\n\n virtual bool XLAPIENTRY displayGridlines() const = 0;\n virtual void XLAPIENTRY setDisplayGridlines(bool show = true) = 0;\n\n virtual bool XLAPIENTRY printGridlines() const = 0;\n virtual void XLAPIENTRY setPrintGridlines(bool print = true) = 0;\n\n virtual int XLAPIENTRY zoom() const = 0;\n virtual void XLAPIENTRY setZoom(int zoom) = 0;\n\n virtual int XLAPIENTRY printZoom() const = 0;\n virtual void XLAPIENTRY setPrintZoom(int zoom) = 0;\n\n virtual bool XLAPIENTRY getPrintFit(int* wPages, int* hPages) const = 0;\n virtual void XLAPIENTRY setPrintFit(int wPages = 1, int hPages = 1) = 0;\n\n virtual bool XLAPIENTRY landscape() const = 0;\n virtual void XLAPIENTRY setLandscape(bool landscape = true) = 0;\n\n virtual Paper XLAPIENTRY paper() const = 0;\n virtual void XLAPIENTRY setPaper(Paper paper = PAPER_DEFAULT) = 0;\n\n virtual const TCHAR* XLAPIENTRY header() const = 0;\n virtual bool XLAPIENTRY setHeader(const TCHAR* header, double margin = 0.5) = 0;\n virtual double XLAPIENTRY headerMargin() const = 0;\n\n virtual const TCHAR* XLAPIENTRY footer() const = 0;\n virtual bool XLAPIENTRY setFooter(const TCHAR* footer, double margin = 0.5) = 0;\n virtual double XLAPIENTRY footerMargin() const = 0;\n\n virtual bool XLAPIENTRY hCenter() const = 0;\n virtual void XLAPIENTRY setHCenter(bool hCenter = true) = 0;\n\n virtual bool XLAPIENTRY vCenter() const = 0;\n virtual void XLAPIENTRY setVCenter(bool vCenter = true) = 0;\n\n virtual double XLAPIENTRY marginLeft() const = 0;\n virtual void XLAPIENTRY setMarginLeft(double margin) = 0;\n\n virtual double XLAPIENTRY marginRight() const = 0;\n virtual void XLAPIENTRY setMarginRight(double margin) = 0;\n\n virtual double XLAPIENTRY marginTop() const = 0;\n virtual void XLAPIENTRY setMarginTop(double margin) = 0;\n\n virtual double XLAPIENTRY marginBottom() const = 0;\n virtual void XLAPIENTRY setMarginBottom(double margin) = 0;\n\n virtual bool XLAPIENTRY printRowCol() const = 0;\n virtual void XLAPIENTRY setPrintRowCol(bool print = true) = 0;\n\n virtual bool XLAPIENTRY printRepeatRows(int* rowFirst, int* rowLast) = 0;\n virtual void XLAPIENTRY setPrintRepeatRows(int rowFirst, int rowLast) = 0;\n\n virtual bool XLAPIENTRY printRepeatCols(int* colFirst, int* colLast) = 0;\n virtual void XLAPIENTRY setPrintRepeatCols(int colFirst, int colLast) = 0;\n\n virtual bool XLAPIENTRY printArea(int* rowFirst, int* rowLast, int* colFirst, int* colLast) = 0;\n virtual void XLAPIENTRY setPrintArea(int rowFirst, int rowLast, int colFirst, int colLast) = 0;\n\n virtual void XLAPIENTRY clearPrintRepeats() = 0;\n virtual void XLAPIENTRY clearPrintArea() = 0;\n\n virtual bool XLAPIENTRY getNamedRange(const TCHAR* name, int* rowFirst, int* rowLast, int* colFirst, int* colLast, int scopeId = SCOPE_UNDEFINED, bool* hidden = 0) = 0;\n virtual bool XLAPIENTRY setNamedRange(const TCHAR* name, int rowFirst, int rowLast, int colFirst, int colLast, int scopeId = SCOPE_UNDEFINED, bool hidden = false) = 0;\n virtual bool XLAPIENTRY delNamedRange(const TCHAR* name, int scopeId = SCOPE_UNDEFINED) = 0;\n\n virtual int XLAPIENTRY namedRangeSize() const = 0;\n virtual const TCHAR* XLAPIENTRY namedRange(int index, int* rowFirst, int* rowLast, int* colFirst, int* colLast, int* scopeId = 0, bool* hidden = 0) = 0;\n\n virtual int XLAPIENTRY tableSize() const = 0;\n virtual const TCHAR* XLAPIENTRY table(int index, int* rowFirst, int* rowLast, int* colFirst, int* colLast, int* headerRowCount, int* totalsRowCount) = 0;\n\n virtual int XLAPIENTRY hyperlinkSize() const = 0;\n virtual const TCHAR* XLAPIENTRY hyperlink(int index, int* rowFirst, int* rowLast, int* colFirst, int* colLast) = 0;\n virtual bool XLAPIENTRY delHyperlink(int index) = 0;\n virtual void XLAPIENTRY addHyperlink(const TCHAR* hyperlink, int rowFirst, int rowLast, int colFirst, int colLast) = 0;\n\n virtual IAutoFilterT<TCHAR>* XLAPIENTRY autoFilter() = 0;\n virtual void XLAPIENTRY applyFilter() = 0;\n virtual void XLAPIENTRY removeFilter() = 0;\n\n virtual const TCHAR* XLAPIENTRY name() const = 0;\n virtual void XLAPIENTRY setName(const TCHAR* name) = 0;\n\n virtual bool XLAPIENTRY protect() const = 0;\n virtual void XLAPIENTRY setProtect(bool protect = true, const TCHAR* password = 0, EnhancedProtection prot = PROT_DEFAULT) = 0;\n\n virtual SheetState XLAPIENTRY hidden() const = 0;\n virtual bool XLAPIENTRY setHidden(SheetState state = SHEETSTATE_HIDDEN) = 0;\n\n virtual void XLAPIENTRY getTopLeftView(int* row, int* col) const = 0;\n virtual void XLAPIENTRY setTopLeftView(int row, int col) = 0;\n\n virtual bool XLAPIENTRY rightToLeft() const = 0;\n virtual void XLAPIENTRY setRightToLeft(bool rightToLeft = true) = 0;\n\n virtual void XLAPIENTRY setAutoFitArea(int rowFirst = 0, int colFirst = 0, int rowLast = -1, int colLast = -1) = 0;\n\n virtual void XLAPIENTRY addrToRowCol(const TCHAR* addr, int* row, int* col, bool* rowRelative = 0, bool* colRelative = 0) = 0;\n virtual const TCHAR* XLAPIENTRY rowColToAddr(int row, int col, bool rowRelative = true, bool colRelative = true) = 0;\n\n virtual void XLAPIENTRY setTabColor(Color color) = 0;\n virtual void XLAPIENTRY setTabColor(int red, int green, int blue) = 0;\n\n virtual bool XLAPIENTRY addIgnoredError(int rowFirst, int colFirst, int rowLast, int colLast, IgnoredError iError) = 0;\n\n virtual void XLAPIENTRY addDataValidation(DataValidationType type, DataValidationOperator op, int rowFirst, int rowLast, int colFirst, int colLast, const TCHAR* value1, const TCHAR* value2 = 0,\n bool allowBlank = true, bool hideDropDown = false, bool showInputMessage = true, bool showErrorMessage = true, const TCHAR* promptTitle = 0, const TCHAR* prompt = 0,\n const TCHAR* errorTitle = 0, const TCHAR* error = 0, DataValidationErrorStyle errorStyle = VALIDATION_ERRSTYLE_STOP) = 0;\n\n virtual void XLAPIENTRY addDataValidationDouble(DataValidationType type, DataValidationOperator op, int rowFirst, int rowLast, int colFirst, int colLast, double value1, double value2,\n bool allowBlank = true, bool hideDropDown = false, bool showInputMessage = true, bool showErrorMessage = true, const TCHAR* promptTitle = 0, const TCHAR* prompt = 0,\n const TCHAR* errorTitle = 0, const TCHAR* error = 0, DataValidationErrorStyle errorStyle = VALIDATION_ERRSTYLE_STOP) = 0;\n\n virtual void XLAPIENTRY removeDataValidations() = 0;\n\n virtual ~ISheetT() {}\n };\n\n}\n\n#endif\n" }, { "alpha_fraction": 0.7243674993515015, "alphanum_fraction": 0.7563248872756958, "avg_line_length": 37.512821197509766, "blob_id": "98eb7923cccaed39440cec429e09ea9dfb3a87a8", "content_id": "906f5dd1e5c5d2e3adf0fe519a3461b9f29d2696", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1502, "license_type": "permissive", "max_line_length": 265, "num_lines": 39, "path": "/README.md", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "<p align=\"center\">\n <img alt=\"RacingWorld\" src=\"./RacingWorld.png\" width=\"400\"/>\n</p>\n\n[![Codacy Badge](https://api.codacy.com/project/badge/Grade/a65b5bf4f3464678b76024d4874a44f5)](https://www.codacy.com/app/VladimirBalun/RacingWorld?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=VladimirBalun/RacingWorld&amp;utm_campaign=Badge_Grade)\n[![Build status](https://ci.appveyor.com/api/projects/status/4mc5eid85976lg4n?svg=true)](https://ci.appveyor.com/project/VladimirBalun/racingworld)\n\n# Introduction\n\nRacing world - is a 3D multiplayer online game about racing. At the moment the game\nis under development, the full description of the game will be added later. The current \nversion of the game can be used only on Windows, but soon it is planned to port to other platforms.\n\n## Minimum system requirements\n\n CPU: Pentium III or AMD K6\n RAM: 100MB\n OpenGL: Version 3.3\n OS:\t Windows 7, Windows 8, Windows 10\n Store:\t 500MB available space\n Network: Broadband Internet connection\n\n## Installation\n\nFull installation guide you can read <a href=\"./Documentation/INSTALLATION.md\">here.</a>\n\n## Contributing\n\nLooking to contribute something to RacingWorld? Please read through our\n<a href=\"./Documentation/CONTRIBUTING.md\">contributing guidelines</a> in\norder to make the contribution process easy and effective for everyone involved.\n\n## License\n\nRacing world is <a href=\"./LICENSE\">Apache License.</a>\n\n---\n\nIf you have any questions, please contact: [email protected]\n" }, { "alpha_fraction": 0.555923342704773, "alphanum_fraction": 0.5663763284683228, "avg_line_length": 26.596153259277344, "blob_id": "6dd2b1a759113dfe1e50a04bb93b44fbb234509c", "content_id": "d3bffc9213a17c481d7a2ba44aab54bc800f51b7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5740, "license_type": "permissive", "max_line_length": 120, "num_lines": 208, "path": "/Sources/Core/Helpers/TypeLists.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <string>\n#include <cstddef>\n\n#include \"Metafunctions.hpp\"\n\n#define TYPELIST_1(__T1__) Core::Helpers::TypeList<__T1__, Core::Helpers::NullType>\n#define TYPELIST_2(__T1__, __T2__) Core::Helpers::TypeList<__T1__, TYPELIST_1(__T2__) >\n#define TYPELIST_3(__T1__, __T2__, __T3__) TypeList<__T1__, TYPELIST_2(__T2__, __T3__) >\n#define TYPELIST_4(__T1__, __T2__, __T3__, __T4__) TypeList<__T1__, TYPELIST_3(__T2__, __T3__, __T4__) >\n#define TYPELIST_5(__T1__, __T2__, __T3__, __T4__, __T5__) TypeList<__T1__, TYPELIST_4(__T2__, __T3__, __T4__, __T5__) >\n\nnamespace Core::Helpers \n{\n\n template<typename T, typename U>\n struct TypeList\n {\n using Head = T;\n using Tail = U;\n };\n\n using StringsList = TYPELIST_4(char*, signed char*, unsigned char*, std::string);\n using SignedIntegralsList = TYPELIST_4(signed char, short int, int, long int);\n using UnsignedIntegralsList = TYPELIST_4(unsigned char, unsigned short int, unsigned int, unsigned long int);\n\n namespace TL {\n\n // Length \n\n template<class TList>\n struct Length;\n\n template<>\n struct Length<NullType>\n {\n static constexpr size_t value = 0u;\n };\n\n template<typename Head, typename Tail>\n struct Length<TypeList<Head, Tail> >\n {\n static constexpr size_t value = 1u + Length<Tail>::value;\n };\n\n // At \n\n template<class TList, unsigned int index>\n struct At;\n\n template<typename Head, typename Tail>\n struct At<TypeList<Head, Tail>, 0>\n {\n using Result = Head;\n };\n\n template<typename Head, typename Tail, unsigned int index>\n struct At<TypeList<Head, Tail>, index>\n {\n using Result = typename At<Tail, index - 1>::Result;\n };\n\n // IndexOf \n\n template<class TList, typename T>\n struct IndexOf;\n\n template<typename T>\n struct IndexOf<NullType, T>\n {\n static constexpr int value = -1;\n };\n\n template<typename T, typename Tail>\n struct IndexOf<TypeList<T, Tail>, T>\n {\n static constexpr int value = 0;\n };\n\n template<typename Head, typename Tail, typename T>\n struct IndexOf<TypeList<Head, Tail>, T>\n {\n static constexpr int value = (IndexOf<Tail, T>::value == -1) ? (-1) : (1 + IndexOf<Tail, T>::value);\n };\n\n // Append \n\n template<class TList, typename T>\n struct Append;\n\n template<typename T>\n struct Append<NullType, T>\n {\n using Result = TYPELIST_1(T);\n };\n\n template<typename Head, typename Tail>\n struct Append<NullType, TypeList<Head, Tail> >\n {\n using Result = TypeList<Head, Tail>;\n };\n\n template<typename Head, typename Tail, typename T>\n struct Append<TypeList<Head, Tail>, T>\n {\n using Result = TypeList<Head, typename Append<Tail, T>::Result>;\n };\n\n // Erase \n\n template<class TList, typename T>\n struct Erase;\n\n template<typename T, typename Tail>\n struct Erase<TypeList<T, Tail>, T>\n {\n using Result = Tail;\n };\n\n template<typename Head, typename Tail, typename T>\n struct Erase<TypeList<Head, Tail>, T>\n {\n using Result = TypeList<Head, typename Erase<Tail, T>::Result>;\n };\n\n // EraseAll \n\n template<class TList, typename T>\n struct EraseAll;\n\n template<typename T>\n struct EraseAll<NullType, T>\n {\n using Result = NullType;\n };\n\n template<typename T, typename Tail>\n struct EraseAll<TypeList<T, Tail>, T>\n {\n using Result = typename EraseAll<Tail, T>::Result;\n };\n\n template<typename Head, typename Tail, typename T>\n struct EraseAll<TypeList<Head, Tail>, T>\n {\n using Result = TypeList<Head, typename EraseAll<Tail, T>::Result>;\n };\n\n // NoDuplicates \n\n template<class TList>\n struct NoDuplicates;\n\n template<>\n struct NoDuplicates<NullType>\n {\n using Result = NullType;\n };\n\n template<typename Head, typename Tail>\n struct NoDuplicates<TypeList<Head, Tail> >\n {\n using Result = TypeList<Head, typename EraseAll<Head, typename NoDuplicates<Tail>::Result>::Result>;\n };\n\n // Replace \n\n template<class TList, typename T, typename U>\n struct Replace;\n\n template<typename T, typename U>\n struct Replace<NullType, T, U>\n {\n using Result = NullType;\n };\n\n template<typename T, typename Tail, typename U>\n struct Replace<TypeList<T, Tail>, T, U>\n {\n using Result = TypeList<U, Tail>;\n };\n\n template<typename Head, typename Tail, typename T, typename U>\n struct Replace<TypeList<Head, Tail>, T, U>\n {\n using Result = TypeList<Head, typename Replace<Tail, T, U>::Result>;\n };\n\n }\n\n}\n" }, { "alpha_fraction": 0.6523496508598328, "alphanum_fraction": 0.6581606864929199, "avg_line_length": 37.05769348144531, "blob_id": "0faedae9c29650bc33faa714b8a2639c9c26234f", "content_id": "d85549abd6ab11bf127da737b3220c86636f0168", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3958, "license_type": "permissive", "max_line_length": 129, "num_lines": 104, "path": "/Sources/Core/Resources/Loaders/ModelLoader.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"ModelLoader.hpp\"\n\n#include <OBJLoader.hpp>\n\n#include \"ImageLoader.hpp\"\n#include \"../Model.hpp\"\n#include \"../Image.hpp\"\n#include \"../Material.hpp\"\n#include \"../../Managers/ResourceManager.hpp\"\n\n#define UNPACK_OBJ1_VEC2(__vector__) \\\n (__vector__).X, (__vector__).Y\n\n#define UNPACK_OBJ1_VEC3(__vector__) \\\n (__vector__).X, (__vector__).Y, (__vector__).Z\n\nbool Core::Resources::Loaders::OBJLoader::load(Model& model, std::string_view model_file_path) noexcept\n{\n objl::Loader obj_loader{};\n const bool was_loaded = obj_loader.LoadFile(model_file_path.data());\n if (was_loaded)\n {\n const std::string model_path = STR(model_file_path.substr(0u, model_file_path.find_last_of(\"\\\\/\") + 1));\n for (const auto& imported_object : obj_loader.LoadedMeshes)\n {\n Model::Object object(imported_object.Vertices.size(), imported_object.Indices.size());\n for (const auto& imported_vertex : imported_object.Vertices)\n {\n glm::vec3 position{ UNPACK_OBJ1_VEC3(imported_vertex.Position) };\n glm::vec3 normal{ UNPACK_OBJ1_VEC3(imported_vertex.Normal) };\n glm::vec2 texture_coordinate{ UNPACK_OBJ1_VEC2(imported_vertex.TextureCoordinate) };\n object.emplaceVertex(normal, position, texture_coordinate);\n }\n for (const auto index : imported_object.Indices)\n {\n object.addIndex(index);\n }\n\n const objl::Material& material = imported_object.MeshMaterial;\n if (!material.name.empty())\n {\n object.setMaterialName(material.name);\n loadMaterial(material, model_path);\n }\n\n object.setName(imported_object.MeshName);\n model.addObject(std::move(object));\n }\n\n return true;\n }\n\n return false;\n}\n\nvoid Core::Resources::Loaders::OBJLoader::loadMaterial(const objl::Material& material, const std::string& material_path) noexcept\n{\n std::string ambient_texture_name = material.map_Ka;\n std::string diffuse_texture_name = material.map_Kd;\n std::string specular_texture_name = material.map_Ks;\n\n tryLoadImage(ambient_texture_name, material_path);\n tryLoadImage(diffuse_texture_name, material_path);\n tryLoadImage(specular_texture_name, material_path);\n\n if (!g_resource_manager.isExistsResource<Material>(material.name))\n {\n auto converted_material = std::make_shared<Material>();\n converted_material->setAmbientTextureName(std::move(ambient_texture_name));\n converted_material->setDiffuseTextureName(std::move(diffuse_texture_name));\n converted_material->setSpecularTextureName(std::move(specular_texture_name));\n g_resource_manager.loadResource<Material>(material.name, converted_material);\n }\n}\n\nvoid Core::Resources::Loaders::OBJLoader::tryLoadImage(const std::string& image_filename, const std::string& image_path) noexcept\n{\n if (!g_resource_manager.isExistsResource<Image>(image_filename))\n {\n auto image = std::make_shared<Image>();\n const std::string full_filename_path = image_path + image_filename;\n if (ImageLoader::load(*image.get(), full_filename_path))\n {\n g_resource_manager.loadResource<Image>(image_filename, image);\n }\n }\n}\n" }, { "alpha_fraction": 0.7244582176208496, "alphanum_fraction": 0.7327141165733337, "avg_line_length": 29.28125, "blob_id": "1a79a9567fd436e36cb5b47f29319a6251073737", "content_id": "633735609c46adb9c531eace54b39f44cafc6f91", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 969, "license_type": "permissive", "max_line_length": 75, "num_lines": 32, "path": "/Sources/Core/InputFWD.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\ntypedef struct GLFWwindow GLFWwindow;\n\nnamespace Core::Input \n{\n\n class MouseState;\n class KeyboardState;\n\n void onInputError(int, const char*) noexcept;\n void onMouseMoveEvent(GLFWwindow*, double, double) noexcept;\n void onMouseClickEvent(GLFWwindow*, int, int, int) noexcept;\n void onKeyboardEvent(GLFWwindow*, int, int, int, int) noexcept;\n\n}\n" }, { "alpha_fraction": 0.6805555820465088, "alphanum_fraction": 0.6890096664428711, "avg_line_length": 29.10909080505371, "blob_id": "19d2e5e5ff63a427e8a064c3d64227b901f6fb66", "content_id": "b00cd8194ee96bec985a9214d4be0f1d213de42b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1656, "license_type": "permissive", "max_line_length": 102, "num_lines": 55, "path": "/Sources/Core/Graphics/SceneGraph/Builders.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include \"../../ResourcesFWD.hpp\"\n#include \"../../Resources/Model.hpp\"\n\nnamespace Core::Graphics::SceneGraph\n{\n\n class Node;\n class Mesh;\n class Scene;\n class Texture2D;\n\n class NodeBuilder\n {\n public:\n static std::shared_ptr<Node> build(Resources::ModelSPtr model, Scene& scene);\n private:\n static std::shared_ptr<Node> createNode(const Resources::Model::Object& object, Scene& scene);\n };\n\n class MeshBuilder\n {\n public:\n static const Mesh* build(const Resources::Model::Object& object, Scene& scene);\n private:\n static unsigned int getCountElements(std::size_t count_vertices) noexcept;\n static unsigned int getCountElements(std::vector<float>& elements) noexcept;\n static void addVec3ToElements(std::vector<float>& elements, const glm::vec3& vector);\n static void addVec2ToElements(std::vector<float>& elements, const glm::vec2& vector);\n };\n\n class TextureBuilder\n {\n public:\n static Texture2D build(Resources::ImageSPtr image);\n };\n\n}\n" }, { "alpha_fraction": 0.49347180128097534, "alphanum_fraction": 0.501483678817749, "avg_line_length": 31.718446731567383, "blob_id": "3e70a42f66cf6fa0ff7f19c377d97164dedc3a7d", "content_id": "a99c43025dd33520946cbdd5a7742271be62860a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3370, "license_type": "permissive", "max_line_length": 124, "num_lines": 103, "path": "/Sources/Core/Helpers/Debug.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#ifdef _DEBUG\n\n #include <string>\n\n #define LOG_DEBUG(__text__) \\\n printf(\"[DEBUG] [%s] [%s:%d] - %s\\n\", __TIMESTAMP__, __FILE__, __LINE__, std::string(__text__).c_str())\n\n #define LOG_INFO(__text__) \\\n printf(\"[INFO] [%s] [%s:%d] - %s\\n\", __TIMESTAMP__, __FILE__, __LINE__, std::string(__text__).c_str())\n\n #define LOG_WARNING(__text__) \\\n printf(\"[WARNING] [%s] [%s:%d] - %s\\n\", __TIMESTAMP__, __FILE__, __LINE__, std::string(__text__).c_str())\n \n #define LOG_ERROR(__text__) \\\n printf(\"[ERROR] [%s] [%s:%d] - %s\\n\", __TIMESTAMP__, __FILE__, __LINE__, std::string(__text__).c_str())\n\n #define LOG_PROFILING(__text__) \\\n printf(\"[PROFILING] [%s] - %s\\n\", __TIMESTAMP__, std::string(__text__).c_str())\n\n #define LOG_DEBUG_IF(__condition__, __text__) \\\n do { \\\n if ((__condition__)) \\\n printf(\"[DEBUG] [%s] [%s:%d] - %s\\n\", __TIMESTAMP__, __FILE__, __LINE__, std::string(__text__).c_str()); \\\n } while(0, 0) \n\n #define LOG_INFO_IF(__condition__, __text__) \\\n do { \\\n if ((__condition__)) \\\n printf(\"[INFO] [%s] [%s:%d] - %s\\n\", __TIMESTAMP__, __FILE__, __LINE__, std::string(__text__).c_str()); \\\n } while(0, 0) \n\n #define LOG_WARNING_IF(__condition__, __text__) \\\n do { \\\n if ((__condition__)) \\\n printf(\"[WARNING] [%s] [%s:%d] - %s\\n\", __TIMESTAMP__, __FILE__, __LINE__, std::string(__text__).c_str()); \\\n } while(0, 0) \n\n #define LOG_ERROR_IF(__condition__, __text__) \\\n do { \\\n if ((__condition__)) \\\n printf(\"[ERROR] [%s] [%s:%d] - %s\\n\", __TIMESTAMP__, __FILE__, __LINE__, std::string(__text__).c_str()); \\\n } while(0, 0) \n\n #define ASSERT(__condition__, __message__) \\\n assert((__condition__) && std::string(__message__).c_str())\n\n #define STATIC_ASSERT(__condition__, __message__) \\\n static_assert((__condition__), std::string(__message__).c_str())\n\n#else // _DEBUG\n\n #define LOG_DEBUG(__text__) \\\n ( (void)0 )\n\n #define LOG_INFO(__text__) \\\n ( (void)0 )\n\n #define LOG_WARNING(__text__) \\\n ( (void)0 )\n\n #define LOG_ERROR(__text__) \\\n ( (void)0 )\n\n #define LOG_PROFILING(__text__) \\\n ( (void)0 )\n\n #define LOG_DEBUG_IF(__condition__, __text__) \\\n ( (void)0 )\n\n #define LOG_INFO_IF(__condition__, __text__) \\\n ( (void)0 )\n\n #define LOG_WARNING_IF(__condition__, __text__) \\\n ( (void)0 )\n\n #define LOG_ERROR_IF(__condition__, __text__) \\\n ( (void)0 )\n\n #define ASSERT(__condition__, __message__) \\\n ( (void)0 )\n\n #define STATIC_ASSERT(__condition__, __message__) \\\n ( (void)0 )\n\n#endif // ! _DEBUG\n" }, { "alpha_fraction": 0.7189292311668396, "alphanum_fraction": 0.723135769367218, "avg_line_length": 23.22222137451172, "blob_id": "0c9caf306cefdb10360e4f54652aa625434fb061", "content_id": "e67a471c585437cb0d2e3b41f62447fe16dafa47", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2615, "license_type": "permissive", "max_line_length": 111, "num_lines": 108, "path": "/Sources/Core/Resources/Model.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Model.hpp\"\n\n#include \"Loaders/ModelLoader.hpp\"\n\n#pragma region Vertex\n\nconst glm::vec3& Core::Resources::Model::Vertex::getNormal() const noexcept\n{\n return m_normal;\n}\n\nconst glm::vec3& Core::Resources::Model::Vertex::getPosition() const noexcept\n{\n return m_position;\n}\n\nconst glm::vec2& Core::Resources::Model::Vertex::getTextureCoordinate() const noexcept\n{\n return m_texture_coordinate;\n}\n\n#pragma endregion\n\n#pragma region Object\n\nCore::Resources::Model::Object::Object(std::size_t count_vertices, std::size_t count_indices) noexcept\n{\n m_indices.reserve(count_indices);\n m_vertices.reserve(count_vertices);\n}\n\nvoid Core::Resources::Model::Object::addIndex(unsigned int index) noexcept\n{\n m_indices.push_back(index);\n}\n\nvoid Core::Resources::Model::Object::setName(const std::string & name) noexcept\n{\n m_name = name;\n}\n\nvoid Core::Resources::Model::Object::setMaterialName(const std::string& name) noexcept\n{\n m_material_name = name;\n}\n\nconst std::string & Core::Resources::Model::Object::getName() const noexcept\n{\n return m_name;\n}\n\nconst std::string& Core::Resources::Model::Object::getMaterialName() const noexcept\n{\n return m_material_name;\n}\n\nconst std::vector<Core::Resources::Model::Vertex>& Core::Resources::Model::Object::getVertices() const noexcept\n{\n return m_vertices;\n}\n\nconst std::vector<unsigned int>& Core::Resources::Model::Object::getIndices() const noexcept\n{\n return m_indices;\n}\n\n#pragma endregion\n\n#pragma region Model\n\nCore::Resources::Model::Model(std::size_t count_objects) noexcept\n{\n m_objects.reserve(count_objects);\n}\n\nvoid Core::Resources::Model::addObject(Object&& object)\n{\n m_objects.push_back(std::move(object));\n}\n\nconst std::vector<Core::Resources::Model::Object>& Core::Resources::Model::getObjects() const noexcept\n{\n return m_objects;\n}\n\nbool Core::Resources::Model::load(std::string_view model_path) noexcept\n{\n return Loaders::OBJLoader::load(*this, model_path);\n}\n\n#pragma endregion" }, { "alpha_fraction": 0.6834319233894348, "alphanum_fraction": 0.6868131756782532, "avg_line_length": 25.584270477294922, "blob_id": "4fecf068c64aa7fd6176709ec5926ee6f274c417", "content_id": "b5305c8b596bbcc90d6672f3784a45e0ddc69249", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2366, "license_type": "permissive", "max_line_length": 129, "num_lines": 89, "path": "/Sources/Core/Graphics/SceneGraph/Scene.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Scene.hpp\"\n\n#include \"Mesh.hpp\"\n#include \"Node.hpp\"\n#include \"../Shader.hpp\"\n\nvoid Core::Graphics::SceneGraph::Scene::addMesh(const std::string& shader_id, Mesh&& mesh)\n{\n m_meshes.emplace(shader_id, std::move(mesh));\n}\n\nvoid Core::Graphics::SceneGraph::Scene::addShader(const std::string& shader_id, Shader&& shader)\n{\n m_shaders.emplace(shader_id, std::move(shader));\n}\n\nvoid Core::Graphics::SceneGraph::Scene::setLight(Light && scene_light) noexcept\n{\n m_scene_light = scene_light;\n}\n\nCore::Graphics::SceneGraph::NodeSPtr Core::Graphics::SceneGraph::Scene::getRootNode() const noexcept\n{\n return m_root_node;\n}\n\nvoid Core::Graphics::SceneGraph::Scene::setRootNode(NodeSPtr root_node) noexcept\n{\n m_root_node = root_node;\n}\n\nbool Core::Graphics::SceneGraph::Scene::isExistsMesh(const std::string & mesh_id) const noexcept\n{\n const auto it = m_meshes.find(mesh_id);\n return it != end(m_meshes);\n}\n\nconst Core::Graphics::SceneGraph::Light& Core::Graphics::SceneGraph::Scene::getLight() const noexcept\n{\n return m_scene_light;\n}\n\nconst Core::Graphics::SceneGraph::Mesh* Core::Graphics::SceneGraph::Scene::getMeshByID(const std::string& mesh_id) const noexcept\n{\n const auto it = m_meshes.find(mesh_id);\n if (it != end(m_meshes))\n {\n return &it->second;\n }\n\n return nullptr;\n}\n\nconst Core::Graphics::Shader* Core::Graphics::SceneGraph::Scene::getShaderByID(const std::string& shader_id) const noexcept\n{\n const auto it = m_shaders.find(shader_id);\n if (it != end(m_shaders))\n {\n return &it->second;\n }\n\n return nullptr;\n}\n\nCore::Graphics::SceneGraph::Scene::~Scene()\n{\n for (auto& it : m_meshes)\n {\n Mesh& mesh = it.second;\n mesh.free();\n }\n}\n" }, { "alpha_fraction": 0.7046194076538086, "alphanum_fraction": 0.7150292992591858, "avg_line_length": 23.015625, "blob_id": "20405aab2b7cd3a9646420c02d0ed59bf166b08e", "content_id": "ac922c983d073698488b97425af002ee8976165f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1537, "license_type": "permissive", "max_line_length": 75, "num_lines": 64, "path": "/Sources/Core/Resources/Image.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Image.hpp\"\n\n#include <boost/filesystem/convenience.hpp>\n#include <SOIL.h>\n\n#include \"Helpers/Debug.hpp\"\n#include \"Loaders/ImageLoader.hpp\"\n\nvoid Core::Resources::Image::setWidth(std::uint16_t width) noexcept\n{\n m_width = width;\n}\n\nvoid Core::Resources::Image::setHeight(std::uint16_t height) noexcept\n{\n m_height = height;\n}\n\nvoid Core::Resources::Image::setData(unsigned char* data) noexcept\n{\n m_data = data;\n}\n\nstd::uint16_t Core::Resources::Image::getWidth() const noexcept\n{\n return m_width;\n}\n\nstd::uint16_t Core::Resources::Image::getHeight() const noexcept\n{\n return m_height;\n}\n\nconst unsigned char* Core::Resources::Image::getData() const noexcept\n{\n return m_data;\n}\n\nbool Core::Resources::Image::load(std::string_view image_path) noexcept\n{\n return Loaders::ImageLoader::load(*this, image_path);\n}\n\nCore::Resources::Image::~Image()\n{\n SOIL_free_image_data(m_data);\n}\n" }, { "alpha_fraction": 0.6321059465408325, "alphanum_fraction": 0.6359819173812866, "avg_line_length": 29.352941513061523, "blob_id": "46c8bc20dc61e7ea261c283e7a5a879d8ceae25c", "content_id": "daed4f08ca081ced6a9895da54ca56e211a9edd5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3096, "license_type": "permissive", "max_line_length": 101, "num_lines": 102, "path": "/Sources/Core/Managers/SoundManager.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"SoundManager.hpp\"\n\n#include \"ResourceManager.hpp\"\n#include \"../Resources.hpp\"\n#include \"../Helpers/Time.hpp\"\n#include \"../Helpers/Debug.hpp\"\n#include \"../Helpers/Macroses.hpp\"\n\nconst float Core::Managers::SoundManager::SOUND_VOLUME = 1.0f;\nconst float Core::Managers::SoundManager::MUSIC_VOLUME = 0.5f;\n\nvoid Core::Managers::SoundManager::initialize()\n{\n#ifdef _DEBUG\n const auto start_time = Helpers::getCurrentTimeInMilliseconds<double>();\n#endif // _DEBUG\n\n m_device = audiere::OpenDevice();\n\n#ifdef _DEBUG\n const auto end_time = Helpers::getCurrentTimeInMilliseconds<double>();\n const auto loading_time = end_time - start_time;\n LOG_PROFILING(\"'SoundManager' was initialized in \" + TO_STR(loading_time) + \"ms.\");\n#endif // _DEBUG\n}\n\nvoid Core::Managers::SoundManager::playSound(const std::string& key)\n{\n if (!m_sounds.contains(key))\n {\n if (const auto sound = g_resource_manager.getResource<Resources::Sound>(key))\n {\n const audiere::OutputStreamPtr output_sound_stream = sound->getAudioStream();\n m_sounds.insert(key, output_sound_stream);\n }\n else\n {\n LOG_WARNING(\"Sound '\" + key + \"' was not played. It's absent in the 'ResourceManager'.\");\n return;\n }\n }\n\n const auto it = m_sounds.get(key);\n if (it.has_value())\n {\n if (const audiere::OutputStreamPtr audio_stream = it.value())\n {\n audio_stream->setVolume(SOUND_VOLUME);\n audio_stream->play();\n }\n }\n}\n\nvoid Core::Managers::SoundManager::playMusic(const std::string& key)\n{\n if (!m_music.contains(key))\n {\n if (const auto sound = g_resource_manager.getResource<Resources::Sound>(key))\n {\n const audiere::OutputStreamPtr output_sound_stream = sound->getAudioStream();\n m_music.insert(key, output_sound_stream);\n }\n else\n {\n LOG_WARNING(\"Music '\" + key + \"' was not played. It's absent in the 'ResourceManager'.\");\n return;\n }\n }\n\n const auto it = m_music.get(key);\n if (it.has_value())\n {\n if (const audiere::OutputStreamPtr audio_stream = it.value())\n {\n audio_stream->setVolume(MUSIC_VOLUME);\n audio_stream->setRepeat(true);\n audio_stream->play();\n }\n }\n}\n\nconst audiere::AudioDevicePtr& Core::Managers::SoundManager::getAudioDevice() const noexcept\n{\n return m_device;\n}\n" }, { "alpha_fraction": 0.6815106868743896, "alphanum_fraction": 0.6860203146934509, "avg_line_length": 35.20408248901367, "blob_id": "4094fb57f77572d37f865d3ae001ede23c88025d", "content_id": "291ebcefcb060856e4d17fe7f3b1b16e076fb82c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1774, "license_type": "permissive", "max_line_length": 107, "num_lines": 49, "path": "/Sources/Core/Helpers/Time.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once \n\n#include <chrono>\n#include <type_traits>\n\nnamespace Core::Helpers \n{\n\n template<typename T>\n T getCurrentTimeInMinutes() noexcept\n {\n static_assert(std::is_arithmetic<T>::value, \"Type of the time must be arithmetic.\");\n const auto current_time = std::chrono::system_clock::now().time_since_epoch();\n return static_cast<T>(std::chrono::duration_cast<std::chrono::minutes>(current_time).count());\n }\n\n template<typename T>\n T getCurrentTimeInSeconds() noexcept\n {\n static_assert(std::is_arithmetic<T>::value, \"Type of the time must be arithmetic.\");\n const auto current_time = std::chrono::system_clock::now().time_since_epoch();\n return static_cast<T>(std::chrono::duration_cast<std::chrono::seconds>(current_time).count());\n }\n\n template<typename T>\n T getCurrentTimeInMilliseconds() noexcept\n {\n static_assert(std::is_arithmetic<T>::value, \"Type of the time must be arithmetic.\");\n const auto current_time = std::chrono::system_clock::now().time_since_epoch();\n return static_cast<T>(std::chrono::duration_cast<std::chrono::milliseconds>(current_time).count());\n }\n\n}\n" }, { "alpha_fraction": 0.7583444714546204, "alphanum_fraction": 0.7606809139251709, "avg_line_length": 41.79999923706055, "blob_id": "69e6fef12aa5e179c2d83b99776ef421accded10", "content_id": "1b928da5ccdc79a3f449b6e3abacf1453c73936c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2996, "license_type": "permissive", "max_line_length": 309, "num_lines": 70, "path": "/Documentation/CONTRIBUTING.md", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "# Contributing to RacingWorld\n\nFollowing these guidelines helps to communicate that you respect the time of the\ndevelopers managing and developing this open source project. In return, they should\nreciprocate that respect in addressing your issue or assessing patches and features.\n\n## Pull requests\n\nPlease ask first before embarking on any significant pull request, otherwise you\nrisk spending a lot of time working on something that the project's developers\nmight not want to merge into the project.\n\nAdhering to the following process is the best way to get your work included in the project:\n\n1. Fork the project, clone your fork, and configure the remotes:\n\n git clone https://github.com/<your-username>/RacingWorld.git\n cd RacingWorld\n git remote add upstream https://github.com/<your-username>/RacingWorld.git\n\n2. If you cloned a while ago, get the latest changes from upstream:\n\n git checkout develop\n git pull --rebase upstream develop\n\n3. Create a new topic branch (off the main project development branch) to contain your feature, change, or fix:\n\n git checkout -b <topic-branch-name>\n\n4. Commit your changes in logical chunks(but before commit read our\n <a href=\"./COMMITS.md\">commits style guide</a>). Use Git's interactive rebase \n feature to tidy up your commits before making them public.\n\n5. Locally rebase the upstream development branch into your topic branch:\n\n git pull --rebase upstream develop\n\n6. Push your topic branch up to your fork:\n\n git push origin <topic-branch-name>\n\n7. Open a Pull Request with a clear title and description against the develop branch. \n\n## Feature requests\n\nBefore opening a feature request, please take a moment to find out whether your\nidea fits with the scope and aims of the project. It's up to you to make a strong\ncase to convince the project's developers of the merits of this feature. Please\nprovide as much detail and context as possible.\n\n## Bug reports\n\nA bug is a demonstrable problem that is caused by the code in the repository. Good bug reports are extremely helpful, so thanks!\nA good bug report shouldn't leave others needing to chase you up for more information. Please try to be as detailed as possible in your report. What is your environment? What steps will reproduce the issue? What would you expect to be the outcome? All these details will help people to fix any potential bugs.\n\nExample:\n\n> Short and descriptive example bug report title\n>\n> A summary of the issue and the OS environment in which it occurs. If suitable, include the steps required to reproduce the bug.\n>\n> This is the first step\n> This is the second step\n> Further steps, etc.\n>\n> Any other information you want to share that is relevant to the issue being reported. This might include the lines of code that you have identified as causing the bug, and potential solutions (and your opinions on their merits).\n\n---\n\nIf you have any questions, please contact: [email protected]\n" }, { "alpha_fraction": 0.68454509973526, "alphanum_fraction": 0.700834333896637, "avg_line_length": 28.611764907836914, "blob_id": "15e36e9ecb4c993ac1191e5d09eaa458f95bb2c9", "content_id": "b07b48d7e03f793989dab1bd6cfca8ad87de9e01", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2517, "license_type": "permissive", "max_line_length": 144, "num_lines": 85, "path": "/Sources/Core/Graphics/SceneGraph/Texture2D.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Texture2D.hpp\"\n\n#include <glew.h>\n\n#include \"../../Helpers/Debug.hpp\"\n\nCore::Graphics::SceneGraph::Texture2D::Texture2D(std::uint16_t width, std::uint16_t height, const unsigned char* data) noexcept\n{\n if (data)\n {\n generateTextureIdentifier();\n bind();\n setTextureParameters();\n fillTextureData(width, height, data);\n unbind();\n }\n else\n {\n LOG_WARNING(\"Incorrect texture data for creating.\");\n }\n}\n\nvoid Core::Graphics::SceneGraph::Texture2D::generateTextureIdentifier() noexcept\n{\n glGenTextures(1, &m_texture_id);\n#ifdef _DEBUG\n LOG_WARNING_IF(m_texture_id == 0, \"ID for texture was not generated.\");\n#endif // _DEBUG\n}\n\nvoid Core::Graphics::SceneGraph::Texture2D::setTextureParameters() const noexcept\n{\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);\n\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);\n}\n\nvoid Core::Graphics::SceneGraph::Texture2D::fillTextureData(std::uint16_t width, std::uint16_t height, const unsigned char* data) const noexcept\n{\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);\n glGenerateMipmap(GL_TEXTURE_2D);\n}\n\nvoid Core::Graphics::SceneGraph::Texture2D::bind() const noexcept\n{\n glBindTexture(GL_TEXTURE_2D, m_texture_id);\n}\n\nvoid Core::Graphics::SceneGraph::Texture2D::unbind() const noexcept\n{\n glBindTexture(GL_TEXTURE_2D, NULL);\n}\n\nvoid Core::Graphics::SceneGraph::Texture2D::free() noexcept\n{\n#ifdef _DEBUG\n unsigned int result = 0u;\n#endif // _DEBUG\n\n glDeleteTextures(1, &m_texture_id);\n#ifdef _DEBUG\n LOG_WARNING_IF((result = glGetError()) == GL_INVALID_VALUE, \"Texture data were not deleted.\");\n#endif // _DEBUG\n\n m_texture_id = 0;\n}\n" }, { "alpha_fraction": 0.6747787594795227, "alphanum_fraction": 0.6797566413879395, "avg_line_length": 31.285715103149414, "blob_id": "dc250d83af89bd46fe9f971daba65f8a097bc8dc", "content_id": "7cf74e263b69e01810e79087d5975754b8cfe910", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1808, "license_type": "permissive", "max_line_length": 78, "num_lines": 56, "path": "/Sources/Core/Input/KeyboardState.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <atomic>\n#include <GLFW/glfw3.h>\n\n#include \"../Helpers/Holders/Singleton.hpp\"\n\n#ifndef g_keyboard_state\n #define g_keyboard_state Core::Input::KeyboardState::getInstance()\n#endif // g_keyboard_state\n\nnamespace Core::Input \n{\n\n class KeyboardState : public Helpers::Holders::Singleton<KeyboardState>\n {\n friend void onMouseClickEvent(GLFWwindow*, int, int, int) noexcept;\n friend void onKeyboardEvent(GLFWwindow*, int, int, int, int) noexcept;\n public:\n bool isPressedKeyW() const noexcept;\n bool isPressedKeyS() const noexcept;\n bool isPressedKeyA() const noexcept;\n bool isPressedKeyD() const noexcept;\n private:\n void pressKeyW() noexcept;\n void pressKeyS() noexcept;\n void pressKeyA() noexcept;\n void pressKeyD() noexcept;\n void releaseKeyW() noexcept;\n void releaseKeyS() noexcept;\n void releaseKeyA() noexcept;\n void releaseKeyD() noexcept;\n private:\n std::atomic_bool m_is_pressed_key_w = false;\n std::atomic_bool m_is_pressed_key_s = false;\n std::atomic_bool m_is_pressed_key_a = false;\n std::atomic_bool m_is_pressed_key_d = false;\n };\n\n}\n" }, { "alpha_fraction": 0.659203052520752, "alphanum_fraction": 0.6622390747070312, "avg_line_length": 41.5, "blob_id": "3738a2c1fde46ac9e8efc0f18446b03cb48301d5", "content_id": "a9d9d5d3ef395087b97b48def8a680cff7a66bb9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2635, "license_type": "permissive", "max_line_length": 97, "num_lines": 62, "path": "/Sources/Core/Helpers/Macroses.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n \n#pragma once\n\n#include <memory>\n#include <string>\n\n#define STR(__value__) std::string(__value__)\n#define TO_STR(__value__) std::to_string(__value__)\n#define TO_SIZE_T(__value__) static_cast<std::size_t>(__value__)\n#define TO_INT(__value__) static_cast<int>(__value__)\n#define TO_DOUBLE(__value__) static_cast<double>(__value__)\n#define TO_FLOAT(__value__) static_cast<float>(__value__)\n\n#define STRINGIFY_IMPL(__value__) #__value__\n#define STRINGIFY(__value__) STRINGIFY_IMPL(__value__)\n\n#define CONCATENATE_IMPL(__first__, __second__) __first__##__second__\n#define CONCATENATE(__first__, __second__) CONCATENATE_IMPL(__first__, __second__)\n\n#define TEMPLATE_CLASS_WITH_PARAMS(__class_name__, ...) \\\n __class_name__<__VA_ARGS__>\n\n#define DECL_SMART_PTRS(__class_name__) \\\n using CONCATENATE(__class_name__, SPtr) = std::shared_ptr<__class_name__ >; \\\n using CONCATENATE(__class_name__, UPtr) = std::unique_ptr<__class_name__ >; \\\n using CONCATENATE(__class_name__, WPtr) = std::weak_ptr<__class_name__ >;\n\n#define DECL_SMART_PTRS_BY_TYPEDEF(__source_class_name__, __dest_class_name__) \\\n using CONCATENATE(__dest_class_name__, SPtr) = std::shared_ptr<__source_class_name__ >; \\\n using CONCATENATE(__dest_class_name__, UPtr) = std::unique_ptr<__source_class_name__ >; \\\n using CONCATENATE(__dest_class_name__, WPtr) = std::weak_ptr<__source_class_name__ >;\n\n#define FWD_DECL_SMART_PTRS_FOR_CLASS(__class_name__) \\\n class __class_name__; \\\n DECL_SMART_PTRS(__class_name__)\n\n#define FWD_DECL_SMART_PTRS_FOR_STRUCT(__struct_name__) \\\n struct __struct_name__; \\\n DECL_SMART_PTRS(__struct_name__)\n\n#define FWD_DECL_SMART_PTRS_FOR_CLASS_BY_TYPEDEF(__source_class_name__, __dest_class_name__) \\\n class __source_class_name__; \\\n DECL_SMART_PTRS_BY_TYPEDEF(__source_class_name__, __dest_class_name__)\n\n#define FWD_DECL_SMART_PTRS_FOR_STRUCT_BY_TYPEDEF(__source_struct_name__, __dest_struct_name__) \\\n struct __source_struct_name__; \\\n DECL_SMART_PTRS_BY_TYPEDEF(__source_struct_name__, __dest_struct_name__)\n" }, { "alpha_fraction": 0.5172160267829895, "alphanum_fraction": 0.5430171489715576, "avg_line_length": 27.809524536132812, "blob_id": "3a98535a8e02e8867cebdbe082f12c264d0c7364", "content_id": "44775c59d0a1017710a2fa492b7e5906e0f64138", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10891, "license_type": "permissive", "max_line_length": 118, "num_lines": 378, "path": "/Dependencies/SOIL/Include/test_SOIL.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#include <string>\n#include <iostream>\n\n#include <windows.h>\n#include <shellapi.h>\n#include <gl/gl.h>\n#include <gl/glext.h>\n\n#include \"SOIL.h\"\n\nLRESULT CALLBACK WindowProc(HWND, UINT, WPARAM, LPARAM);\nvoid EnableOpenGL(HWND hwnd, HDC*, HGLRC*);\nvoid DisableOpenGL(HWND, HDC, HGLRC);\n\nint WINAPI WinMain(HINSTANCE hInstance,\n HINSTANCE hPrevInstance,\n LPSTR lpCmdLine,\n int nCmdShow)\n{\n WNDCLASSEX wcex;\n HWND hwnd;\n HDC hDC;\n HGLRC hRC;\n MSG msg;\n BOOL bQuit = FALSE;\n float theta = 0.0f;\n\n // register window class\n wcex.cbSize = sizeof(WNDCLASSEX);\n wcex.style = CS_OWNDC;\n wcex.lpfnWndProc = WindowProc;\n wcex.cbClsExtra = 0;\n wcex.cbWndExtra = 0;\n wcex.hInstance = hInstance;\n wcex.hIcon = LoadIcon(NULL, IDI_APPLICATION);\n wcex.hCursor = LoadCursor(NULL, IDC_ARROW);\n wcex.hbrBackground = (HBRUSH)GetStockObject(BLACK_BRUSH);\n wcex.lpszMenuName = NULL;\n wcex.lpszClassName = \"GLSample\";\n wcex.hIconSm = LoadIcon(NULL, IDI_APPLICATION);\n\n\n if (!RegisterClassEx(&wcex))\n return 0;\n\n // create main window\n hwnd = CreateWindowEx(0,\n \"GLSample\",\n \"SOIL Sample\",\n WS_OVERLAPPEDWINDOW,\n CW_USEDEFAULT,\n CW_USEDEFAULT,\n 512,\n 512,\n NULL,\n NULL,\n hInstance,\n NULL);\n\n ShowWindow(hwnd, nCmdShow);\n\n //\tcheck my error handling\n /*\n SOIL_load_OGL_texture( \"img_test.png\", SOIL_LOAD_AUTO, SOIL_CREATE_NEW_ID, 0 );\n std::cout << \"'\" << SOIL_last_result() << \"'\" << std::endl;\n */\n\n\n // enable OpenGL for the window\n EnableOpenGL(hwnd, &hDC, &hRC);\n\n glEnable( GL_BLEND );\n //glDisable( GL_BLEND );\n //\tstraight alpha\n glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );\n //\tpremultiplied alpha (remember to do the same in glColor!!)\n //glBlendFunc( GL_ONE, GL_ONE_MINUS_SRC_ALPHA );\n\n //\tdo I want alpha thresholding?\n glEnable( GL_ALPHA_TEST );\n glAlphaFunc( GL_GREATER, 0.5f );\n\n //\tlog what the use is asking us to load\n std::string load_me = lpCmdLine;\n if( load_me.length() > 2 )\n {\n\t\t//load_me = load_me.substr( 1, load_me.length() - 2 );\n\t\tload_me = load_me.substr( 0, load_me.length() - 0 );\n } else\n {\n \t//load_me = \"img_test_uncompressed.dds\";\n \t//load_me = \"img_test_indexed.tga\";\n \t//load_me = \"img_test.dds\";\n \tload_me = \"img_test.png\";\n \t//load_me = \"odd_size.jpg\";\n \t//load_me = \"img_cheryl.jpg\";\n \t//load_me = \"oak_odd.png\";\n \t//load_me = \"field_128_cube.dds\";\n \t//load_me = \"field_128_cube_nomip.dds\";\n \t//load_me = \"field_128_cube_uc.dds\";\n \t//load_me = \"field_128_cube_uc_nomip.dds\";\n \t//load_me = \"Goblin.dds\";\n \t//load_me = \"parquet.dds\";\n \t//load_me = \"stpeters_probe.hdr\";\n \t//load_me = \"VeraMoBI_sdf.png\";\n\n \t//\tfor testing the texture rectangle code\n \t//load_me = \"test_rect.png\";\n }\n\tstd::cout << \"'\" << load_me << \"'\" << std::endl;\n\n\t//\t1st try to load it as a single-image-cubemap\n\t//\t(note, need DDS ordered faces: \"EWUDNS\")\n\tGLuint tex_ID;\n int time_me;\n\n std::cout << \"Attempting to load as a cubemap\" << std::endl;\n time_me = clock();\n\ttex_ID = SOIL_load_OGL_single_cubemap(\n\t\t\tload_me.c_str(),\n\t\t\tSOIL_DDS_CUBEMAP_FACE_ORDER,\n\t\t\tSOIL_LOAD_AUTO,\n\t\t\tSOIL_CREATE_NEW_ID,\n\t\t\tSOIL_FLAG_POWER_OF_TWO\n\t\t\t| SOIL_FLAG_MIPMAPS\n\t\t\t//| SOIL_FLAG_COMPRESS_TO_DXT\n\t\t\t//| SOIL_FLAG_TEXTURE_REPEATS\n\t\t\t//| SOIL_FLAG_INVERT_Y\n\t\t\t| SOIL_FLAG_DDS_LOAD_DIRECT\n\t\t\t);\n\ttime_me = clock() - time_me;\n\tstd::cout << \"the load time was \" << 0.001f * time_me << \" seconds (warning: low resolution timer)\" << std::endl;\n if( tex_ID > 0 )\n {\n \tglEnable( GL_TEXTURE_CUBE_MAP );\n\t\tglEnable( GL_TEXTURE_GEN_S );\n\t\tglEnable( GL_TEXTURE_GEN_T );\n\t\tglEnable( GL_TEXTURE_GEN_R );\n\t\tglTexGeni( GL_S, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP );\n\t\tglTexGeni( GL_T, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP );\n\t\tglTexGeni( GL_R, GL_TEXTURE_GEN_MODE, GL_REFLECTION_MAP );\n\t\tglBindTexture( GL_TEXTURE_CUBE_MAP, tex_ID );\n\t\t//\treport\n\t\tstd::cout << \"the loaded single cube map ID was \" << tex_ID << std::endl;\n\t\t//std::cout << \"the load time was \" << 0.001f * time_me << \" seconds (warning: low resolution timer)\" << std::endl;\n } else\n {\n \tstd::cout << \"Attempting to load as a HDR texture\" << std::endl;\n\t\ttime_me = clock();\n\t\ttex_ID = SOIL_load_OGL_HDR_texture(\n\t\t\t\tload_me.c_str(),\n\t\t\t\t//SOIL_HDR_RGBE,\n\t\t\t\t//SOIL_HDR_RGBdivA,\n\t\t\t\tSOIL_HDR_RGBdivA2,\n\t\t\t\t0,\n\t\t\t\tSOIL_CREATE_NEW_ID,\n\t\t\t\tSOIL_FLAG_POWER_OF_TWO\n\t\t\t\t| SOIL_FLAG_MIPMAPS\n\t\t\t\t//| SOIL_FLAG_COMPRESS_TO_DXT\n\t\t\t\t);\n\t\ttime_me = clock() - time_me;\n\t\tstd::cout << \"the load time was \" << 0.001f * time_me << \" seconds (warning: low resolution timer)\" << std::endl;\n\n\t\t//\tdid I fail?\n\t\tif( tex_ID < 1 )\n\t\t{\n\t\t\t//\tloading of the single-image-cubemap failed, try it as a simple texture\n\t\t\tstd::cout << \"Attempting to load as a simple 2D texture\" << std::endl;\n\t\t\t//\tload the texture, if specified\n\t\t\ttime_me = clock();\n\t\t\ttex_ID = SOIL_load_OGL_texture(\n\t\t\t\t\tload_me.c_str(),\n\t\t\t\t\tSOIL_LOAD_AUTO,\n\t\t\t\t\tSOIL_CREATE_NEW_ID,\n\t\t\t\t\tSOIL_FLAG_POWER_OF_TWO\n\t\t\t\t\t| SOIL_FLAG_MIPMAPS\n\t\t\t\t\t//| SOIL_FLAG_MULTIPLY_ALPHA\n\t\t\t\t\t//| SOIL_FLAG_COMPRESS_TO_DXT\n\t\t\t\t\t| SOIL_FLAG_DDS_LOAD_DIRECT\n\t\t\t\t\t//| SOIL_FLAG_NTSC_SAFE_RGB\n\t\t\t\t\t//| SOIL_FLAG_CoCg_Y\n\t\t\t\t\t//| SOIL_FLAG_TEXTURE_RECTANGLE\n\t\t\t\t\t);\n\t\t\ttime_me = clock() - time_me;\n\t\t\tstd::cout << \"the load time was \" << 0.001f * time_me << \" seconds (warning: low resolution timer)\" << std::endl;\n\t\t}\n\n\t\tif( tex_ID > 0 )\n\t\t{\n\t\t\t//\tenable texturing\n\t\t\tglEnable( GL_TEXTURE_2D );\n\t\t\t//glEnable( 0x84F5 );// enables texture rectangle\n\t\t\t// bind an OpenGL texture ID\n\t\t\tglBindTexture( GL_TEXTURE_2D, tex_ID );\n\t\t\t//\treport\n\t\t\tstd::cout << \"the loaded texture ID was \" << tex_ID << std::endl;\n\t\t\t//std::cout << \"the load time was \" << 0.001f * time_me << \" seconds (warning: low resolution timer)\" << std::endl;\n\t\t} else\n\t\t{\n\t\t\t//\tloading of the texture failed...why?\n\t\t\tglDisable( GL_TEXTURE_2D );\n\t\t\tstd::cout << \"Texture loading failed: '\" << SOIL_last_result() << \"'\" << std::endl;\n\t\t}\n }\n\n // program main loop\n const float ref_mag = 0.1f;\n while (!bQuit)\n {\n // check for messages\n if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))\n {\n // handle or dispatch messages\n if (msg.message == WM_QUIT)\n {\n bQuit = TRUE;\n }\n else\n {\n TranslateMessage(&msg);\n DispatchMessage(&msg);\n }\n }\n else\n {\n // OpenGL animation code goes here\n theta = clock() * 0.1;\n\n float tex_u_max = 1.0f;//0.2f;\n float tex_v_max = 1.0f;//0.2f;\n\n glClearColor(0.0f, 0.0f, 0.0f, 0.0f);\n glClear(GL_COLOR_BUFFER_BIT);\n\n glPushMatrix();\n glScalef( 0.8f, 0.8f, 0.8f );\n //glRotatef(-0.314159f*theta, 0.0f, 0.0f, 1.0f);\n\t\t\tglColor4f( 1.0f, 1.0f, 1.0f, 1.0f );\n\t\t\tglNormal3f( 0.0f, 0.0f, 1.0f );\n glBegin(GL_QUADS);\n\t\t\t\tglNormal3f( -ref_mag, -ref_mag, 1.0f );\n glTexCoord2f( 0.0f, tex_v_max );\n glVertex3f( -1.0f, -1.0f, -0.1f );\n\n glNormal3f( ref_mag, -ref_mag, 1.0f );\n glTexCoord2f( tex_u_max, tex_v_max );\n glVertex3f( 1.0f, -1.0f, -0.1f );\n\n glNormal3f( ref_mag, ref_mag, 1.0f );\n glTexCoord2f( tex_u_max, 0.0f );\n glVertex3f( 1.0f, 1.0f, -0.1f );\n\n glNormal3f( -ref_mag, ref_mag, 1.0f );\n glTexCoord2f( 0.0f, 0.0f );\n glVertex3f( -1.0f, 1.0f, -0.1f );\n glEnd();\n glPopMatrix();\n\n\t\t\ttex_u_max = 1.0f;\n tex_v_max = 1.0f;\n glPushMatrix();\n glScalef( 0.8f, 0.8f, 0.8f );\n glRotatef(theta, 0.0f, 0.0f, 1.0f);\n\t\t\tglColor4f( 1.0f, 1.0f, 1.0f, 1.0f );\n\t\t\tglNormal3f( 0.0f, 0.0f, 1.0f );\n glBegin(GL_QUADS);\n glTexCoord2f( 0.0f, tex_v_max );\t\tglVertex3f( 0.0f, 0.0f, 0.1f );\n glTexCoord2f( tex_u_max, tex_v_max );\t\tglVertex3f( 1.0f, 0.0f, 0.1f );\n glTexCoord2f( tex_u_max, 0.0f );\t\tglVertex3f( 1.0f, 1.0f, 0.1f );\n glTexCoord2f( 0.0f, 0.0f );\t\tglVertex3f( 0.0f, 1.0f, 0.1f );\n glEnd();\n glPopMatrix();\n\n {\n\t\t\t\t/*\tcheck for errors\t*/\n\t\t\t\tGLenum err_code = glGetError();\n\t\t\t\twhile( GL_NO_ERROR != err_code )\n\t\t\t\t{\n\t\t\t\t\tprintf( \"OpenGL Error @ %s: %i\", \"drawing loop\", err_code );\n\t\t\t\t\terr_code = glGetError();\n\t\t\t\t}\n\t\t\t}\n\n SwapBuffers(hDC);\n\n Sleep (1);\n }\n }\n\n //\tand show off the screenshot capability\n /*\n load_me += \"-screenshot.tga\";\n SOIL_save_screenshot( load_me.c_str(), SOIL_SAVE_TYPE_TGA, 0, 0, 512, 512 );\n //*/\n //*\n load_me += \"-screenshot.bmp\";\n SOIL_save_screenshot( load_me.c_str(), SOIL_SAVE_TYPE_BMP, 0, 0, 512, 512 );\n //*/\n /*\n load_me += \"-screenshot.dds\";\n SOIL_save_screenshot( load_me.c_str(), SOIL_SAVE_TYPE_DDS, 0, 0, 512, 512 );\n //*/\n\n // shutdown OpenGL\n DisableOpenGL(hwnd, hDC, hRC);\n\n // destroy the window explicitly\n DestroyWindow(hwnd);\n\n return msg.wParam;\n}\n\nLRESULT CALLBACK WindowProc(HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam)\n{\n switch (uMsg)\n {\n case WM_CLOSE:\n PostQuitMessage(0);\n break;\n\n case WM_DESTROY:\n return 0;\n\n case WM_KEYDOWN:\n {\n switch (wParam)\n {\n case VK_ESCAPE:\n PostQuitMessage(0);\n break;\n }\n }\n break;\n\n default:\n return DefWindowProc(hwnd, uMsg, wParam, lParam);\n }\n\n return 0;\n}\n\nvoid EnableOpenGL(HWND hwnd, HDC* hDC, HGLRC* hRC)\n{\n PIXELFORMATDESCRIPTOR pfd;\n\n int iFormat;\n\n /* get the device context (DC) */\n *hDC = GetDC(hwnd);\n\n /* set the pixel format for the DC */\n ZeroMemory(&pfd, sizeof(pfd));\n\n pfd.nSize = sizeof(pfd);\n pfd.nVersion = 1;\n pfd.dwFlags = PFD_DRAW_TO_WINDOW |\n PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER;\n pfd.iPixelType = PFD_TYPE_RGBA;\n pfd.cColorBits = 24;\n pfd.cDepthBits = 16;\n pfd.iLayerType = PFD_MAIN_PLANE;\n\n iFormat = ChoosePixelFormat(*hDC, &pfd);\n\n SetPixelFormat(*hDC, iFormat, &pfd);\n\n /* create and enable the render context (RC) */\n *hRC = wglCreateContext(*hDC);\n\n wglMakeCurrent(*hDC, *hRC);\n}\n\nvoid DisableOpenGL (HWND hwnd, HDC hDC, HGLRC hRC)\n{\n wglMakeCurrent(NULL, NULL);\n wglDeleteContext(hRC);\n ReleaseDC(hwnd, hDC);\n}\n\n" }, { "alpha_fraction": 0.7082601189613342, "alphanum_fraction": 0.7152900099754333, "avg_line_length": 28.947368621826172, "blob_id": "0400f4e7c63f2356da2911ede253acc164c211e2", "content_id": "d646a7341f913c203953dbaad9d304fe7ba6e97c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1138, "license_type": "permissive", "max_line_length": 85, "num_lines": 38, "path": "/Sources/Core/Resources/Sound.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <audiere.h>\n\n#include \"IResource.hpp\"\n#include \"../Helpers/Holders/Polymorphic.hpp\"\n\nnamespace Core::Resources \n{\n\n class Sound final : public IResource, public Helpers::Holders::Polymorphic<Sound>\n {\n public:\n audiere::OutputStreamPtr getAudioStream() const noexcept;\n void setAudioStream(audiere::OutputStreamPtr&& audio_stream) noexcept;\n public:\n bool load(std::string_view sound_path) noexcept override;\n private:\n audiere::OutputStreamPtr m_audio_stream = nullptr;\n };\n\n}\n" }, { "alpha_fraction": 0.7338510155677795, "alphanum_fraction": 0.7409107089042664, "avg_line_length": 37.297298431396484, "blob_id": "771177d33896631e5c609f1c3b9c36082c75288f", "content_id": "d6867c5564110c4c4ba5531579eee38e056fc2c4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 2833, "license_type": "permissive", "max_line_length": 117, "num_lines": 74, "path": "/Sources/CMakeLists.txt", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#\n# Copyright 2018 Vladimir Balun\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\ncmake_minimum_required (VERSION 3.10)\n\nproject (RacingWorld)\n\nfile (GLOB_RECURSE SOURCES \"*.cpp\" \"*.hpp\")\n\nforeach (FILE ${SOURCES})\n get_filename_component (PARENT_DIR \"${FILE}\" DIRECTORY)\n string (REPLACE \"${CMAKE_CURRENT_SOURCE_DIR}\" \"\" GROUP \"${PARENT_DIR}\")\n string (REPLACE \"/\" \"\\\\\" GROUP \"${GROUP}\")\n\n if (\"${FILE}\" MATCHES \".*\\\\.cpp\")\n set (GROUP \"${GROUP}\")\n elseif (\"${FILE}\" MATCHES \".*\\\\.hpp\")\n set (GROUP \"${GROUP}\")\n endif ()\n\n source_group (\"${GROUP}\" FILES \"${FILE}\")\nendforeach ()\n\ninclude_directories (\n\t${PROJECT_SOURCES_DIR}\n\t${PROJECT_SOURCES_DIR}/Core\n\t${PROJECT_SOURCES_DIR}/Game\n\t${PROJECT_DEPENDENCIES_DIR}/Boost/Include \n\t${PROJECT_DEPENDENCIES_DIR}/LibXL/Include \n\t${PROJECT_DEPENDENCIES_DIR}/GLFW/Include \n\t${PROJECT_DEPENDENCIES_DIR}/GLEW/Include\n\t${PROJECT_DEPENDENCIES_DIR}/GLM/Include\n\t${PROJECT_DEPENDENCIES_DIR}/Audiere/Include\n\t${PROJECT_DEPENDENCIES_DIR}/OBJLoader/Include\n\t${PROJECT_DEPENDENCIES_DIR}/SOIL/Include\n)\n\nlink_directories (\n\t${PROJECT_DEPENDENCIES_DIR}/Boost/Libraries \n\t${PROJECT_DEPENDENCIES_DIR}/LibXL/Libraries \n\t${PROJECT_DEPENDENCIES_DIR}/GLFW/Libraries \n\t${PROJECT_DEPENDENCIES_DIR}/GLEW/Libraries\n\t${PROJECT_DEPENDENCIES_DIR}/GLM/Libraries\n\t${PROJECT_DEPENDENCIES_DIR}/Audiere/Libraries\n\t${PROJECT_DEPENDENCIES_DIR}/SOIL/Libraries\n)\n\ninclude (PrecompiledHeader.cmake)\n\nadd_executable (${PROJECT_NAME} ${SOURCES})\ntarget_link_libraries (${PROJECT_NAME} wsock32.lib opengl32.lib libxl.lib glfw3.lib glew32s.lib audiere.lib SOIL.lib)\ntarget_include_directories (${PROJECT_NAME} PRIVATE ${PROJECT_DEPENDENCIES_DIR}/OpenGL/Include)\nadd_precompiled_header (${PROJECT_NAME} PrecompiledHeader.hpp PrecompiledHeader.cpp)\nadd_definitions (-D_SILENCE_CXX17_ALLOCATOR_VOID_DEPRECATION_WARNING)\nset_target_properties (${PROJECT_NAME} PROPERTIES FOLDER \"Game\")\nset_target_properties (${PROJECT_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BIN_DIR})\nset_target_properties (${PROJECT_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_DEBUG ${PROJECT_BIN_DIR})\nset_target_properties (${PROJECT_NAME} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_RELEASE ${PROJECT_BIN_DIR})\nset_target_properties (${PROJECT_NAME} PROPERTIES VS_DEBUGGER_WORKING_DIRECTORY ${PROJECT_BIN_DIR})\n\ninclude (CPack.cmake)" }, { "alpha_fraction": 0.6394004225730896, "alphanum_fraction": 0.6428265571594238, "avg_line_length": 27.475608825683594, "blob_id": "9d91e61f6d9028da54735562f6ac39803f08e8f4", "content_id": "52de456b0a33cf85a396d5029c3b997356632176", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2335, "license_type": "permissive", "max_line_length": 132, "num_lines": 82, "path": "/Sources/Core/Input/InputEventHandler.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"InputEventHandler.hpp\"\n\n#include \"MouseState.hpp\"\n#include \"KeyboardState.hpp\"\n#include \"../Helpers/Debug.hpp\"\n#include \"../Helpers/Macroses.hpp\"\n\nvoid Core::Input::onInputError(const int error, const char* description) noexcept\n{\n LOG_WARNING(STR(\"Input error:\" + STR(description)));\n}\n\nvoid Core::Input::onMouseMoveEvent(GLFWwindow* window, double x_pos, double y_pos) noexcept\n{\n g_mouse_state.setPosition(static_cast<int>(x_pos), static_cast<int>(y_pos));\n}\n\nvoid Core::Input::onMouseClickEvent(GLFWwindow* window, int button, int action, int mods) noexcept\n{\n \n}\n\nvoid Core::Input::onKeyboardEvent(GLFWwindow* window, const int key, const int scan_code, const int action, const int mods) noexcept\n{\n KeyboardState& keyboard_state = KeyboardState::getInstance();\n if (key == GLFW_KEY_W && action == GLFW_PRESS)\n {\n keyboard_state.pressKeyW();\n return;\n }\n if (key == GLFW_KEY_S && action == GLFW_PRESS)\n {\n keyboard_state.pressKeyS();\n return;\n }\n if (key == GLFW_KEY_A && action == GLFW_PRESS)\n {\n keyboard_state.pressKeyA();\n return;\n }\n if (key == GLFW_KEY_D && action == GLFW_PRESS)\n {\n keyboard_state.pressKeyD();\n return;\n }\n if (key == GLFW_KEY_W && action == GLFW_RELEASE)\n {\n keyboard_state.releaseKeyW();\n return;\n }\n if (key == GLFW_KEY_S && action == GLFW_RELEASE)\n {\n keyboard_state.releaseKeyS();\n return;\n }\n if (key == GLFW_KEY_A && action == GLFW_RELEASE)\n {\n keyboard_state.releaseKeyA();\n return;\n }\n if (key == GLFW_KEY_D && action == GLFW_RELEASE)\n {\n keyboard_state.releaseKeyD();\n }\n}\n" }, { "alpha_fraction": 0.7288135886192322, "alphanum_fraction": 0.7377866506576538, "avg_line_length": 33.58620834350586, "blob_id": "95e65da03adcf00953bef60b959d712594eb3b61", "content_id": "4cf0b1192a07e8082777afaee9d21f174fb3bea3", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1003, "license_type": "permissive", "max_line_length": 100, "num_lines": 29, "path": "/Sources/Core/Input/InputEventHandler.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <GLFW/glfw3.h>\n\nnamespace Core::Input \n{\n\n void onInputError(int error, const char* description) noexcept;\n void onMouseMoveEvent(GLFWwindow* window, double x_pos, double y_pos) noexcept;\n void onMouseClickEvent(GLFWwindow* window, int button, int action, int mods) noexcept;\n void onKeyboardEvent(GLFWwindow* window, int key, int scan_code, int action, int mods) noexcept;\n\n}\n" }, { "alpha_fraction": 0.6627101898193359, "alphanum_fraction": 0.6854599118232727, "avg_line_length": 32.63333511352539, "blob_id": "9d062450ea794bd127fb167fb175343a1b5ebf2e", "content_id": "e02037160558f4800b5342a259c5e75812d5161f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1011, "license_type": "permissive", "max_line_length": 155, "num_lines": 30, "path": "/Dependencies/LibXL/Include/IFilterColumnT.h", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#ifndef LIBXL_IFILTERCOLUMN_H\n#define LIBXL_IFILTERCOLUMN_H\n\n#include \"setup.h\"\n#include \"enum.h\"\n\nnamespace libxl\n{\n template<class TCHAR>\n struct IFilterColumnT\n {\n virtual int XLAPIENTRY index() const = 0;\n\n virtual Filter XLAPIENTRY filterType() const = 0;\n\n virtual int XLAPIENTRY filterSize() const = 0;\n virtual const TCHAR* XLAPIENTRY filter(int index) const = 0;\n virtual void XLAPIENTRY addFilter(const TCHAR* value) = 0;\n\n virtual bool XLAPIENTRY getTop10(double* value, bool* top, bool* percent) = 0;\n virtual void XLAPIENTRY setTop10(double value, bool top = true, bool percent = false) = 0;\n\n virtual bool XLAPIENTRY getCustomFilter(Operator* op1, const TCHAR** v1, Operator* op2, const TCHAR** v2, bool* andOp) const = 0;\n virtual void XLAPIENTRY setCustomFilter(Operator op1, const TCHAR* v1, Operator op2 = OPERATOR_EQUAL, const TCHAR* v2 = 0, bool andOp = false) = 0;\n\n virtual void XLAPIENTRY clear() = 0;\n };\n}\n\n#endif\n\n\n" }, { "alpha_fraction": 0.7118186950683594, "alphanum_fraction": 0.7161359786987305, "avg_line_length": 22.756410598754883, "blob_id": "142615fb715ccb79e3038691dc6c39364aa5bb99", "content_id": "27b49448633359cea16714758462ff3fc347f847", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1853, "license_type": "permissive", "max_line_length": 75, "num_lines": 78, "path": "/Sources/Core/Input/KeyboardState.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"KeyboardState.hpp\"\n\nbool Core::Input::KeyboardState::isPressedKeyW() const noexcept\n{\n return m_is_pressed_key_w.load();\n}\n\nbool Core::Input::KeyboardState::isPressedKeyS() const noexcept\n{\n return m_is_pressed_key_s.load();\n}\n\nbool Core::Input::KeyboardState::isPressedKeyA() const noexcept\n{\n return m_is_pressed_key_a.load();\n}\n\nbool Core::Input::KeyboardState::isPressedKeyD() const noexcept\n{\n return m_is_pressed_key_d.load();\n}\n\nvoid Core::Input::KeyboardState::pressKeyW() noexcept\n{\n m_is_pressed_key_w.store(true);\n}\n\nvoid Core::Input::KeyboardState::pressKeyS() noexcept\n{\n m_is_pressed_key_s.store(true);\n}\n\nvoid Core::Input::KeyboardState::pressKeyA() noexcept\n{\n m_is_pressed_key_a.store(true);\n}\n\nvoid Core::Input::KeyboardState::pressKeyD() noexcept\n{\n m_is_pressed_key_d.store(true);\n}\n\nvoid Core::Input::KeyboardState::releaseKeyW() noexcept\n{\n m_is_pressed_key_w.store(false);\n}\n\nvoid Core::Input::KeyboardState::releaseKeyS() noexcept\n{\n m_is_pressed_key_s.store(false);\n}\n\nvoid Core::Input::KeyboardState::releaseKeyA() noexcept\n{\n m_is_pressed_key_a.store(false);\n}\n\nvoid Core::Input::KeyboardState::releaseKeyD() noexcept\n{\n m_is_pressed_key_d.store(false);\n}\n" }, { "alpha_fraction": 0.6163182854652405, "alphanum_fraction": 0.6227242350578308, "avg_line_length": 32.70454406738281, "blob_id": "f727cc578c355d1c6fe8541badd2c18833fdaa8a", "content_id": "2338f775305a2eca889b8ca425e6419f9c598643", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2966, "license_type": "permissive", "max_line_length": 101, "num_lines": 88, "path": "/Sources/Core/Resources/Model.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <string>\n#include <vector>\n#include <unordered_map>\n#include <glm/vec2.hpp>\n#include <glm/vec3.hpp>\n\n#include \"IResource.hpp\"\n#include \"../Helpers/Holders/Polymorphic.hpp\"\n\nnamespace Core::Resources \n{\n\n class Model final : public IResource, public Helpers::Holders::Polymorphic<Model>\n {\n public:\n\n struct Vertex\n {\n public:\n Vertex() noexcept = default;\n Vertex(glm::vec3&& normal, glm::vec3&& position, glm::vec2&& texture_coordinate) noexcept\n : m_normal(normal), m_position(position), m_texture_coordinate(texture_coordinate) {}\n const glm::vec3& getNormal() const noexcept;\n const glm::vec3& getPosition() const noexcept;\n const glm::vec2& getTextureCoordinate() const noexcept;\n private:\n glm::vec3 m_normal{};\n glm::vec3 m_position{};\n glm::vec2 m_texture_coordinate{};\n };\n\n class Object\n {\n public:\n Object() noexcept = default;\n Object(std::size_t count_vertices, std::size_t count_indices) noexcept;\n template<typename... Args>\n void emplaceVertex(Args... args);\n void addIndex(unsigned int index) noexcept;\n void setName(const std::string& name) noexcept;\n void setMaterialName(const std::string& name) noexcept;\n const std::string& getName() const noexcept;\n const std::string& getMaterialName() const noexcept;\n const std::vector<Vertex>& getVertices() const noexcept;\n const std::vector<unsigned int>& getIndices() const noexcept;\n private:\n std::string m_name{};\n std::string m_material_name{};\n std::vector<Vertex> m_vertices{};\n std::vector<unsigned int> m_indices{};\n };\n\n public:\n Model() noexcept = default;\n explicit Model(std::size_t count_objects) noexcept;\n void addObject(Object&& object);\n const std::vector<Object>& getObjects() const noexcept;\n public:\n bool load(std::string_view model_path) noexcept override;\n private:\n std::vector<Object> m_objects{};\n };\n\n template<typename... Args>\n void Model::Object::emplaceVertex(Args... args)\n {\n m_vertices.emplace_back(std::forward<Args>(args)...);\n }\n\n}\n" }, { "alpha_fraction": 0.7210056781768799, "alphanum_fraction": 0.7242497801780701, "avg_line_length": 23.176469802856445, "blob_id": "f9139cb6570b714318821976f90b96326ac1e6bc", "content_id": "e649c1c94a8dfb218dcbcf79b1f42ee3f7c8a9e6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3699, "license_type": "permissive", "max_line_length": 107, "num_lines": 153, "path": "/Sources/Core/Resources/Map.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Map.hpp\"\n\n#include \"Loaders/MapLoader.hpp\"\n\n#pragma region MapObject\n\nvoid Core::Resources::Map::MapObject::setScale(float scale) noexcept\n{\n m_scale = scale;\n}\n\nvoid Core::Resources::Map::MapObject::setXRotation(float rotation) noexcept\n{\n m_x_rotation = rotation;\n}\n\nvoid Core::Resources::Map::MapObject::setYRotation(float rotation) noexcept\n{\n m_y_rotation = rotation;\n}\n\nvoid Core::Resources::Map::MapObject::setZRotation(float rotation) noexcept\n{\n m_z_rotation = rotation;\n}\n\nvoid Core::Resources::Map::MapObject::setPosition(const bg::point3f_t& position) noexcept\n{\n m_position = position;\n}\n\nfloat Core::Resources::Map::MapObject::getScale() const noexcept\n{\n return m_scale;\n}\n\nfloat Core::Resources::Map::MapObject::getXRotation() const noexcept\n{\n return m_x_rotation;\n}\n\nfloat Core::Resources::Map::MapObject::getYRotation() const noexcept\n{\n return m_y_rotation;\n}\n\nfloat Core::Resources::Map::MapObject::getZRotation() const noexcept\n{\n return m_z_rotation;\n}\n\nconst bg::point3f_t& Core::Resources::Map::MapObject::getPosition() const noexcept\n{\n return m_position;\n}\n\n#pragma endregion\n\n#pragma region Map\n\nvoid Core::Resources::Map::setDimensions(const bg::box2f_t& dimensions) noexcept\n{\n m_map_dimensions = dimensions;\n}\n\nvoid Core::Resources::Map::setTreeModelsName(const std::string& model_name)\n{\n m_tree_models_name = model_name;\n}\n\nvoid Core::Resources::Map::setHouseModelsName(const std::string& model_name)\n{\n m_house_models_name = model_name;\n}\n\nvoid Core::Resources::Map::setGroundModelsName(const std::string& model_name)\n{\n m_ground_models_name = model_name;\n}\n\nvoid Core::Resources::Map::setTreeObjects(std::vector<MapObject>&& map_objects) noexcept\n{\n m_tree_objects = std::move(map_objects);\n}\n\nvoid Core::Resources::Map::setHouseObjects(std::vector<MapObject>&& map_objects) noexcept\n{\n m_house_objects = std::move(map_objects);\n}\n\nvoid Core::Resources::Map::setGroundObjects(std::vector<MapObject>&& map_objects) noexcept\n{\n m_ground_objects = std::move(map_objects);\n}\n\nconst bg::box2f_t& Core::Resources::Map::getDimensions() const noexcept\n{\n return m_map_dimensions;\n}\n\nstd::string_view Core::Resources::Map::getTreeModelsName() const noexcept\n{\n return m_tree_models_name;\n}\n\nstd::string_view Core::Resources::Map::getHouseModelsName() const noexcept\n{\n return m_house_models_name;\n}\n\nstd::string_view Core::Resources::Map::getGroundModelsName() const noexcept\n{\n return m_ground_models_name;\n}\n\nconst std::vector<Core::Resources::Map::MapObject>& Core::Resources::Map::getTreeObjects() const noexcept\n{\n return m_tree_objects;\n}\n\nconst std::vector<Core::Resources::Map::MapObject>& Core::Resources::Map::getHouseObjects() const noexcept\n{\n return m_house_objects;\n}\n\nconst std::vector<Core::Resources::Map::MapObject>& Core::Resources::Map::getGroundObjects() const noexcept\n{\n return m_ground_objects;\n}\n\nbool Core::Resources::Map::load(std::string_view map_path) noexcept\n{\n return Loaders::MapLoader::load(*this, map_path);\n}\n\n#pragma endregion\n" }, { "alpha_fraction": 0.6805071830749512, "alphanum_fraction": 0.6824962496757507, "avg_line_length": 33.97391128540039, "blob_id": "fd5c849dc8222b3f148e213a2898aab9644ccd71", "content_id": "d419ef3898b7204e61ebc480de836867c4b83bef", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4022, "license_type": "permissive", "max_line_length": 122, "num_lines": 115, "path": "/Sources/Core/Managers/ConfigurationManager.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"ConfigurationManager.hpp\"\n\n#include <boost/property_tree/ptree.hpp>\n#include <boost/property_tree/xml_parser.hpp>\n\n#include \"../Helpers/Time.hpp\"\n#include \"../Helpers/Debug.hpp\"\n#include \"../Helpers/Macroses.hpp\"\n\nvoid Core::Managers::ConfigurationManager::initialize()\n{\n#ifdef _DEBUG\n const auto start_time = Helpers::getCurrentTimeInMilliseconds<double>();\n#endif // _DEBUG\n\n using namespace boost::property_tree;\n const std::string configuration_filename = \"Configuration.xml\";\n const std::string configuration_file_full_path = STR(getResourcesPath()) + configuration_filename;\n\n try\n {\n ptree xml_configuration{};\n read_xml(configuration_file_full_path, xml_configuration);\n for (const auto& xml_data : xml_configuration.get_child(\"configuration\"))\n {\n if (xml_data.first == \"language\")\n {\n m_current_language = xml_data.second.get<std::string>(\"<xmlattr>.data\", \"\");;\n }\n if (xml_data.first == \"locales-config-filename\")\n {\n m_locales_file_configuration_path = xml_data.second.get<std::string>(\"<xmlattr>.filename\", \"\");;\n }\n if (xml_data.first == \"player-config-filename\")\n {\n m_player_file_configuration_path = xml_data.second.get<std::string>(\"<xmlattr>.filename\", \"\");;\n }\n if (xml_data.first == \"resources-config-filename\")\n {\n m_resources_file_configuration_path = xml_data.second.get<std::string>(\"<xmlattr>.filename\", \"\");;\n }\n }\n }\n catch (const xml_parser_error&)\n {\n LOG_ERROR(\"'ConfigurationManager' was not initialized.\");\n }\n\n#ifdef _DEBUG\n const auto end_time = Helpers::getCurrentTimeInMilliseconds<double>();\n const auto loading_time = end_time - start_time;\n LOG_PROFILING(\"'ConfigurationManager' was initialized in \" + TO_STR(loading_time) + \"ms.\");\n#endif // _DEBUG\n}\n\nstd::string Core::Managers::ConfigurationManager::getCurrentLanguage() const noexcept\n{\n return m_current_language;\n}\n\nstd::string_view Core::Managers::ConfigurationManager::getMapsPath() const noexcept\n{\n static const std::string models_path = STR(getResourcesPath()) + \"/Maps/\";\n return models_path;\n}\n\nstd::string_view Core::Managers::ConfigurationManager::getModelsPath() const noexcept\n{\n static const std::string models_path = STR(getResourcesPath()) + \"/Models/\";\n return models_path;\n}\n\nstd::string_view Core::Managers::ConfigurationManager::getShadersPath() const noexcept\n{\n static const std::string shaders_path = STR(getResourcesPath()) + \"/Shaders/\";\n return shaders_path;\n}\n\nstd::string_view Core::Managers::ConfigurationManager::getResourcesPath() const noexcept\n{\n static const std::string resources_path = (std::filesystem::current_path().parent_path() / \"Resources\" / \"\").string();\n return resources_path;\n}\n\nstd::string_view Core::Managers::ConfigurationManager::getLocalesConfigurationFilename() const noexcept\n{\n return m_locales_file_configuration_path;\n}\n\nstd::string_view Core::Managers::ConfigurationManager::getPlayerConfigurationFilename() const noexcept\n{\n return m_player_file_configuration_path;\n}\n\nstd::string_view Core::Managers::ConfigurationManager::getResourcesConfigurationFilename() const noexcept\n{\n return m_resources_file_configuration_path;\n}\n" }, { "alpha_fraction": 0.6778324842453003, "alphanum_fraction": 0.6857143044471741, "avg_line_length": 25.0256404876709, "blob_id": "2d48293dab47afa5de6aa7b97dfabafa3a294033", "content_id": "64ca14d8dc9e32c95f9b9ad6562e87b1fbd8d10a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1015, "license_type": "permissive", "max_line_length": 75, "num_lines": 39, "path": "/Sources/Core/GUI/Window.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <string>\n\ntypedef struct GLFWwindow GLFWwindow;\n\nnamespace Core::GUI\n{\n \n class Window\n {\n public:\n Window(int width, int height, const std::string& title) noexcept;\n void show() noexcept;\n ~Window();\n private:\n void initGLContext() const noexcept;\n void initEventHandlers() const noexcept;\n private:\n GLFWwindow* m_window = nullptr;\n };\n\n}\n" }, { "alpha_fraction": 0.7497955560684204, "alphanum_fraction": 0.7645134925842285, "avg_line_length": 33.97142791748047, "blob_id": "84f09ad8c8f80353380562cd90f5b82392b06ba5", "content_id": "8b28e010f8f8f8b1dd2881a3d5c7a64ce797bd42", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1223, "license_type": "permissive", "max_line_length": 101, "num_lines": 35, "path": "/Sources/CPack.cmake", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#\n# Copyright 2018 Vladimir Balun\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\ncmake_minimum_required (VERSION 3.10)\n\ninclude(InstallRequiredSystemLibraries)\n\ninstall (TARGETS ${PROJECT_NAME} RUNTIME DESTINATION Bin)\ninstall (DIRECTORY ../Resources DESTINATION ./)\n\nset (CPACK_GENERATOR NSIS)\nset (CPACK_PACKAGE_NAME \"RacingWorld\")\nset (CPACK_PACKAGE_VENDOR \"Vladimir Balun\")\nset (CPACK_PACKAGE_DESCRIPTION_SUMMARY \"RacingWorld - is a multiplayer online 3D game about racing.\")\nset (CPACK_PACKAGE_VERSION \"0.1.0\")\nset (CPACK_PACKAGE_VERSION_MAJOR \"0\")\nset (CPACK_PACKAGE_VERSION_MINOR \"1\")\nset (CPACK_PACKAGE_VERSION_PATCH \"0\")\nset (CPACK_PACKAGE_INSTALL_DIRECTORY \"RacingWorld\")\nSET (CPACK_NSIS_MODIFY_PATH ON)\n\nINCLUDE(CPack)" }, { "alpha_fraction": 0.6843288540840149, "alphanum_fraction": 0.6984585523605347, "avg_line_length": 30.139999389648438, "blob_id": "4927feb5160f2ceb40c5a1a3d869b02232722f14", "content_id": "20bb0cabd46e73502d95f907063d5ecd355757e2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3114, "license_type": "permissive", "max_line_length": 121, "num_lines": 100, "path": "/Sources/Core/Graphics/Camera.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Camera.hpp\"\n\n#include <GLFW/glfw3.h>\n#include <glm/ext/matrix_transform.inl>\n#include <glm/ext/matrix_clip_space.inl>\n#include <glm/detail/func_trigonometric.inl>\n\nvoid Core::Graphics::Camera::setSpeed(GLfloat speed) noexcept\n{\n m_speed = speed;\n}\n\nvoid Core::Graphics::Camera::moveLeft() noexcept\n{\n glm::vec3 right_direction = cross(m_forward_direction, m_up_direction);\n normalize(right_direction);\n right_direction *= m_speed;\n m_position -= right_direction;\n}\n\nvoid Core::Graphics::Camera::moveRight() noexcept\n{\n glm::vec3 right_direction = cross(m_forward_direction, m_up_direction);\n normalize(right_direction);\n right_direction *= m_speed;\n m_position += right_direction;\n}\n\nvoid Core::Graphics::Camera::moveForward() noexcept\n{\n m_position += m_forward_direction * m_speed;\n}\n\nvoid Core::Graphics::Camera::moveBackward() noexcept\n{\n m_position -= m_forward_direction * m_speed;\n}\n\nvoid Core::Graphics::Camera::turn(GLint x_offset, GLint y_offset) noexcept\n{\n constexpr float SENSIIVITY = 0.05f;\n constexpr float PITCH_MIN_ANGLE = -89.0f;\n constexpr float PITCH_MAX_ANGLE = 89.0f;\n\n m_yaw_angle += x_offset * SENSIIVITY;\n m_pitch_angle += y_offset * SENSIIVITY;\n\n if (m_pitch_angle > PITCH_MAX_ANGLE) \n {\n m_pitch_angle = PITCH_MAX_ANGLE;\n }\n if (m_pitch_angle < PITCH_MIN_ANGLE) \n {\n m_pitch_angle = PITCH_MIN_ANGLE;\n }\n\n const float pitch_angle_per_radians = glm::radians(m_pitch_angle);\n const float yaw_angle_per_radians = glm::radians(m_yaw_angle);\n const float pitch_angle_cosine = cos(pitch_angle_per_radians);\n m_forward_direction.x = pitch_angle_cosine * cos(yaw_angle_per_radians);\n m_forward_direction.y = sin(pitch_angle_per_radians);\n m_forward_direction.z = pitch_angle_cosine * sin(yaw_angle_per_radians);\n normalize(m_forward_direction);\n}\n\nconst glm::vec3& Core::Graphics::Camera::getPosition() const noexcept\n{\n return m_position;\n}\n\nglm::mat4x4 Core::Graphics::Camera::getViewMatrix() const noexcept\n{\n return lookAt(m_position, m_position + m_forward_direction, m_up_direction);;\n}\n\nglm::mat4x4 Core::Graphics::Camera::getProjectionMatrix() const noexcept\n{\n glm::mat4x4 perspective_matrix{ 1.0f };\n const int window_width = 860; // TODO\n const int window_height = 600; // TODO\n perspective_matrix = glm::perspective(glm::radians(45.0f), (float)window_width / (float)window_height, 0.1f, 100.0f);\n return perspective_matrix;\n}\n" }, { "alpha_fraction": 0.7171922922134399, "alphanum_fraction": 0.7212614417076111, "avg_line_length": 27.08571434020996, "blob_id": "7f97cdc98ceaaf586cc3b8264a53726018f5d46b", "content_id": "f032f0ecf00c8f6b764bc35389e4b12e36a1addd", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1966, "license_type": "permissive", "max_line_length": 84, "num_lines": 70, "path": "/Sources/Core/Input/MouseState.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"MouseState.hpp\"\n\nbool Core::Input::MouseState::isPressedLeftButton() const noexcept\n{\n return m_is_pressed_left_button.load();\n}\n\nbool Core::Input::MouseState::isPressedRightButton() const noexcept\n{\n return m_is_pressed_right_button.load();\n}\n\nint Core::Input::MouseState::getAndUnsetXDisplacementOffset() noexcept\n{\n const int offset = m_x_position.load() - m_last_x_position.load();\n m_last_x_position.store(m_x_position);\n return offset;\n}\n\nint Core::Input::MouseState::getAndUnsetYDisplacementOffset() noexcept\n{\n const int offset = m_last_y_position.load() - m_y_position.load();\n m_last_y_position.store(m_y_position);\n return offset;\n}\n\nvoid Core::Input::MouseState::pressLeftButton() noexcept\n{\n m_is_pressed_left_button.store(true);\n}\n\nvoid Core::Input::MouseState::pressRightButton() noexcept\n{\n m_is_pressed_right_button.store(true);\n}\n\nvoid Core::Input::MouseState::releaseLeftButton() noexcept\n{\n m_is_pressed_left_button.store(false);\n}\n\nvoid Core::Input::MouseState::releaseRightButton() noexcept\n{\n m_is_pressed_right_button.store(false);\n}\n\nvoid Core::Input::MouseState::setPosition(const int x_pos, const int y_pos) noexcept\n{\n m_last_x_position.store(m_x_position);\n m_last_y_position.store(m_y_position);\n m_x_position.store(x_pos);\n m_y_position.store(y_pos);\n}\n" }, { "alpha_fraction": 0.6675993204116821, "alphanum_fraction": 0.6720761060714722, "avg_line_length": 26.07575798034668, "blob_id": "7e58c3a04928f7046f71e914b46039f195bc6f72", "content_id": "fce5bbe0f2f31db389b0264e661346863553ea58", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1787, "license_type": "permissive", "max_line_length": 96, "num_lines": 66, "path": "/Scripts/Build.py", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "#\n# Copyright 2018 Vladimir Balun\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport sys\nimport shutil\nimport argparse\nimport subprocess\n\n\nBUILD_DIR = '../Build'\n\n\ndef is_build_with_clean():\n parser = argparse.ArgumentParser(description='Script to build the solution.')\n parser.add_argument('-c', '--clean', action='store_true', help='clean old solution version')\n args = parser.parse_args()\n return args.clean\n\n\ndef remove_old_solution():\n try:\n shutil.rmtree(BUILD_DIR)\n return True\n except OSError:\n return False\n\n\ndef is_program_installed(program_name):\n return which(program_name)\n\n\ndef run_command(cmd):\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n process.wait()\n for line in process.stdout:\n print(line)\n\n\nif __name__ == '__main__':\n if is_build_with_clean():\n if remove_old_solution():\n print('Old solution cleaned successfully.')\n else:\n sys.stderr.write('Old solution folder was not cleaned.\\n')\n sys.stderr.write('New solution will not be generated.')\n exit()\n\n if not is_program_installed('cmake'):\n sys.stderr.write('CMake is not installed on your PC.\\n')\n exit()\n\n command = BUILD_DIR + 'cmake ..'\n run_command(command)\n" }, { "alpha_fraction": 0.7269999980926514, "alphanum_fraction": 0.7360000014305115, "avg_line_length": 30.25, "blob_id": "0ef0143ca8f72b0eb5fb7784aa9e464813acf6a7", "content_id": "d2ea810d7f115fd317464c36ec39e7fbeb285c8b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1000, "license_type": "permissive", "max_line_length": 75, "num_lines": 32, "path": "/Sources/Core/Graphics/FrameCalculator.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"FrameCalculator.hpp\"\n\n#include <GLFW/glfw3.h>\n\nvoid Core::Graphics::FrameCalculator::onChangingFrame() noexcept\n{\n const float current_time = static_cast<float>(glfwGetTime());\n m_deltaTime = current_time - m_lastFrameTime;\n m_lastFrameTime = current_time;\n}\n\nfloat Core::Graphics::FrameCalculator::getDeltaTime() const noexcept\n{\n return m_deltaTime;\n}\n" }, { "alpha_fraction": 0.6331311464309692, "alphanum_fraction": 0.638450026512146, "avg_line_length": 27.23130989074707, "blob_id": "cff53b2f2403a51422fffe7fb5e8b1c595190af0", "content_id": "c1b1ee8057b42fc9bbb6e7f92792fce18c047203", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 44182, "license_type": "permissive", "max_line_length": 135, "num_lines": 1565, "path": "/Dependencies/Audiere/Include/audiere.h", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/**\n * @file\n *\n * Audiere Sound System\n * Version 1.9.4\n * (c) 2001-2003 Chad Austin\n *\n * This API uses principles explained at\n * http://aegisknight.org/cppinterface.html\n *\n * This code licensed under the terms of the LGPL. See doc/license.txt.\n *\n *\n * Note: When compiling this header in gcc, you may want to use the\n * -Wno-non-virtual-dtor flag to get rid of those annoying \"class has\n * virtual functions but no virtual destructor\" warnings.\n *\n * This file is structured as follows:\n * - includes, macro definitions, other general setup\n * - interface definitions\n * - DLL-safe entry points (not for general use)\n * - inline functions that use those entry points\n */\n\n#ifndef AUDIERE_H\n#define AUDIERE_H\n\n\n#include <vector>\n#include <string>\n\n#ifdef _MSC_VER\n#pragma warning(disable : 4786)\n#endif\n\n\n#ifndef __cplusplus\n #error Audiere requires C++\n#endif\n\n\n// DLLs in Windows should use the standard (Pascal) calling convention\n#ifndef ADR_CALL\n #if defined(WIN32) || defined(_WIN32)\n #define ADR_CALL __stdcall\n #else\n #define ADR_CALL\n #endif\n#endif\n\n// Export functions from the DLL\n#ifndef ADR_DECL\n# if defined(WIN32) || defined(_WIN32)\n# ifdef AUDIERE_EXPORTS\n# define ADR_DECL __declspec(dllexport)\n# else\n# define ADR_DECL __declspec(dllimport)\n# endif\n# else\n# define ADR_DECL\n# endif\n#endif\n\n\n\n#define ADR_FUNCTION(ret) extern \"C\" ADR_DECL ret ADR_CALL\n#define ADR_METHOD(ret) virtual ret ADR_CALL\n\n\nnamespace audiere {\n\n class RefCounted {\n protected:\n /**\n * Protected so users of refcounted classes don't use std::auto_ptr\n * or the delete operator.\n *\n * Interfaces that derive from RefCounted should define an inline,\n * empty, protected destructor as well.\n */\n ~RefCounted() { }\n\n public:\n /**\n * Add a reference to the internal reference count.\n */\n ADR_METHOD(void) ref() = 0;\n\n /**\n * Remove a reference from the internal reference count. When this\n * reaches 0, the object is destroyed.\n */\n ADR_METHOD(void) unref() = 0;\n };\n\n\n template<typename T>\n class RefPtr {\n public:\n RefPtr(T* ptr = 0) {\n m_ptr = 0;\n *this = ptr;\n }\n\n RefPtr(const RefPtr<T>& ptr) {\n m_ptr = 0;\n *this = ptr;\n }\n\n ~RefPtr() {\n if (m_ptr) {\n m_ptr->unref();\n m_ptr = 0;\n }\n }\n \n RefPtr<T>& operator=(T* ptr) {\n if (ptr != m_ptr) {\n if (m_ptr) {\n m_ptr->unref();\n }\n m_ptr = ptr;\n if (m_ptr) {\n m_ptr->ref();\n }\n }\n return *this;\n }\n\n RefPtr<T>& operator=(const RefPtr<T>& ptr) {\n *this = ptr.m_ptr;\n return *this;\n }\n\n T* operator->() const {\n return m_ptr;\n }\n\n T& operator*() const {\n return *m_ptr;\n }\n\n operator bool() const {\n return (m_ptr != 0);\n }\n\n T* get() const {\n return m_ptr;\n }\n\n private:\n T* m_ptr;\n };\n\n\n template<typename T, typename U>\n bool operator==(const RefPtr<T>& a, const RefPtr<U>& b) {\n return (a.get() == b.get());\n }\n\n template<typename T>\n bool operator==(const RefPtr<T>& a, const T* b) {\n return (a.get() == b);\n }\n\n template<typename T>\n bool operator==(const T* a, const RefPtr<T>& b) {\n return (a == b.get());\n }\n \n\n template<typename T, typename U>\n bool operator!=(const RefPtr<T>& a, const RefPtr<U>& b) {\n return (a.get() != b.get());\n }\n\n template<typename T>\n bool operator!=(const RefPtr<T>& a, const T* b) {\n return (a.get() != b);\n }\n\n template<typename T>\n bool operator!=(const T* a, const RefPtr<T>& b) {\n return (a != b.get());\n }\n\n\n /**\n * A basic implementation of the RefCounted interface. Derive\n * your implementations from RefImplementation<YourInterface>.\n */\n template<class Interface>\n class RefImplementation : public Interface {\n protected:\n RefImplementation() {\n m_ref_count = 0;\n }\n\n /**\n * So the implementation can put its destruction logic in the destructor,\n * as natural C++ code does.\n */\n virtual ~RefImplementation() { }\n\n public:\n void ADR_CALL ref() {\n ++m_ref_count;\n }\n\n void ADR_CALL unref() {\n if (--m_ref_count == 0) {\n delete this;\n }\n }\n\n private:\n int m_ref_count;\n };\n\n\n /**\n * Represents a random-access file, usually stored on a disk. Files\n * are always binary: that is, they do no end-of-line\n * transformations. File objects are roughly analogous to ANSI C\n * FILE* objects.\n *\n * This interface is not synchronized.\n */\n class File : public RefCounted {\n protected:\n ~File() { }\n\n public:\n /**\n * The different ways you can seek within a file.\n */\n enum SeekMode {\n BEGIN,\n CURRENT,\n END,\n };\n\n /**\n * Read size bytes from the file, storing them in buffer.\n *\n * @param buffer buffer to read into\n * @param size number of bytes to read\n *\n * @return number of bytes successfully read\n */\n ADR_METHOD(int) read(void* buffer, int size) = 0;\n\n /**\n * Jump to a new position in the file, using the specified seek\n * mode. Remember: if mode is END, the position must be negative,\n * to seek backwards from the end of the file into its contents.\n * If the seek fails, the current position is undefined.\n *\n * @param position position relative to the mode\n * @param mode where to seek from in the file\n *\n * @return true on success, false otherwise\n */\n ADR_METHOD(bool) seek(int position, SeekMode mode) = 0;\n\n /**\n * Get current position within the file.\n *\n * @return current position\n */\n ADR_METHOD(int) tell() = 0;\n };\n typedef RefPtr<File> FilePtr;\n\n\n /// Storage formats for sample data.\n enum SampleFormat {\n SF_U8, ///< unsigned 8-bit integer [0,255]\n SF_S16, ///< signed 16-bit integer in host endianness [-32768,32767]\n };\n\n\n /// Supported audio file formats.\n enum FileFormat {\n FF_AUTODETECT,\n FF_WAV,\n FF_OGG,\n FF_FLAC,\n FF_MP3,\n FF_MOD,\n FF_AIFF,\n FF_SPEEX,\n };\n\n\n /**\n * Source of raw PCM samples. Sample sources have an intrinsic format\n * (@see SampleFormat), sample rate, and number of channels. They can\n * be read from or reset.\n *\n * Some sample sources are seekable. Seekable sources have two additional\n * properties: length and position. Length is read-only.\n *\n * This interface is not synchronized.\n */\n class SampleSource : public RefCounted {\n protected:\n ~SampleSource() { }\n\n public:\n /**\n * Retrieve the number of channels, sample rate, and sample format of\n * the sample source.\n */\n ADR_METHOD(void) getFormat(\n int& channel_count,\n int& sample_rate,\n SampleFormat& sample_format) = 0;\n\n /**\n * Read frame_count samples into buffer. buffer must be at least\n * |frame_count * GetSampleSize(format) * channel_count| bytes long.\n *\n * @param frame_count number of frames to read\n * @param buffer buffer to store samples in\n *\n * @return number of frames actually read\n */\n ADR_METHOD(int) read(int frame_count, void* buffer) = 0;\n\n /**\n * Reset the sample source. This has the same effect as setPosition(0)\n * on a seekable source. On an unseekable source, it resets all internal\n * state to the way it was when the source was first created.\n */\n ADR_METHOD(void) reset() = 0;\n\n /**\n * @return true if the stream is seekable, false otherwise\n */\n ADR_METHOD(bool) isSeekable() = 0;\n\n /**\n * @return number of frames in the stream, or 0 if the stream is not\n * seekable\n */\n ADR_METHOD(int) getLength() = 0;\n \n /**\n * Sets the current position within the sample source. If the stream\n * is not seekable, this method does nothing.\n *\n * @param position current position in frames\n */\n ADR_METHOD(void) setPosition(int position) = 0;\n\n /**\n * Returns the current position within the sample source.\n *\n * @return current position in frames\n */\n ADR_METHOD(int) getPosition() = 0;\n\n /**\n * @return true if the sample source is set to repeat\n */\n ADR_METHOD(bool) getRepeat() = 0;\n\n /**\n * Sets whether the sample source should repeat or not. Note that not\n * all sample sources repeat by starting again at the beginning of the\n * sound. For example MOD files can contain embedded loop points.\n *\n * @param repeat true if the source should repeat, false otherwise\n */\n ADR_METHOD(void) setRepeat(bool repeat) = 0;\n\n /// Returns number of metadata tags present in this sample source.\n ADR_METHOD(int) getTagCount() = 0;\n\n /**\n * Returns the key of the i'th tag in the source. If the tag is\n * \"author=me\", the key is \"author\".\n */\n virtual const char* ADR_CALL getTagKey(int i) = 0;\n\n /**\n * Returns the value of the i'th tag in the source. If the tag is\n * \"author=me\", the value is \"me\".\n */\n virtual const char* ADR_CALL getTagValue(int i) = 0;\n\n /**\n * Returns the type of the i'th tag in the source. The type is where\n * the tag comes from, i.e. \"ID3v1\", \"ID3v2\", or \"vorbis\".\n */\n virtual const char* ADR_CALL getTagType(int i) = 0;\n };\n typedef RefPtr<SampleSource> SampleSourcePtr;\n\n\n /**\n * LoopPointSource is a wrapper around another SampleSource, providing\n * custom loop behavior. LoopPointSource maintains a set of links\n * within the sample stream and whenever the location of one of the links\n * (i.e. a loop point) is reached, the stream jumps to that link's target.\n * Each loop point maintains a count. Every time a loop point comes into\n * effect, the count is decremented. Once it reaches zero, that loop point\n * is temporarily disabled. If a count is not a positive value, it\n * cannot be disabled. Calling reset() resets all counts to their initial\n * values.\n *\n * Loop points only take effect when repeating has been enabled via the\n * setRepeat() method.\n *\n * Loop points are stored in sorted order by their location. Each one\n * has an index based on its location within the list. A loop point's\n * index will change if another is added before it.\n *\n * There is always one implicit loop point after the last sample that\n * points back to the first. That way, this class's default looping\n * behavior is the same as a standard SampleSource. This loop point\n * does not show up in the list.\n */\n class LoopPointSource : public SampleSource {\n protected:\n ~LoopPointSource() { }\n\n public:\n /**\n * Adds a loop point to the stream. If a loop point at 'location'\n * already exists, the new one replaces it. Location and target are\n * clamped to the actual length of the stream.\n *\n * @param location frame where loop occurs\n * @param target frame to jump to after loop point is hit\n * @param loopCount number of times to execute this jump.\n */\n ADR_METHOD(void) addLoopPoint(\n int location, int target, int loopCount) = 0;\n\n /**\n * Removes the loop point at index 'index' from the stream.\n *\n * @param index index of the loop point to remove\n */\n ADR_METHOD(void) removeLoopPoint(int index) = 0;\n\n /**\n * Returns the number of loop points in this stream.\n */\n ADR_METHOD(int) getLoopPointCount() = 0;\n\n /**\n * Retrieves information about a specific loop point.\n *\n * @param index index of the loop point\n * @param location frame where loop occurs\n * @param target loop point's target frame\n * @param loopCount number of times to loop from this particular point\n *\n * @return true if the index is valid and information is returned\n */\n ADR_METHOD(bool) getLoopPoint(\n int index, int& location, int& target, int& loopCount) = 0;\n };\n typedef RefPtr<LoopPointSource> LoopPointSourcePtr;\n\n\n /**\n * A connection to an audio device. Multiple output streams are\n * mixed by the audio device to produce the final waveform that the\n * user hears.\n *\n * Each output stream can be independently played and stopped. They\n * also each have a volume from 0.0 (silence) to 1.0 (maximum volume).\n */\n class OutputStream : public RefCounted {\n protected:\n ~OutputStream() { }\n\n public:\n /**\n * Start playback of the output stream. If the stream is already\n * playing, this does nothing.\n */\n ADR_METHOD(void) play() = 0;\n\n /**\n * Stop playback of the output stream. If the stream is already\n * stopped, this does nothing.\n */\n ADR_METHOD(void) stop() = 0;\n\n /**\n * @return true if the output stream is playing, false otherwise\n */\n ADR_METHOD(bool) isPlaying() = 0;\n\n /**\n * Reset the sample source or buffer to the beginning. On seekable\n * streams, this operation is equivalent to setPosition(0).\n *\n * On some output streams, this operation can be moderately slow, as up to\n * several seconds of PCM buffer must be refilled.\n */\n ADR_METHOD(void) reset() = 0;\n\n /**\n * Set whether the output stream should repeat.\n *\n * @param repeat true if the stream should repeat, false otherwise\n */\n ADR_METHOD(void) setRepeat(bool repeat) = 0;\n\n /**\n * @return true if the stream is repeating\n */\n ADR_METHOD(bool) getRepeat() = 0;\n\n /**\n * Sets the stream's volume.\n *\n * @param volume 0.0 = silence, 1.0 = maximum volume (default)\n */\n ADR_METHOD(void) setVolume(float volume) = 0;\n\n /**\n * Gets the current volume.\n *\n * @return current volume of the output stream\n */\n ADR_METHOD(float) getVolume() = 0;\n\n /**\n * Set current pan.\n *\n * @param pan -1.0 = left, 0.0 = center (default), 1.0 = right\n */\n ADR_METHOD(void) setPan(float pan) = 0;\n\n /**\n * Get current pan.\n */\n ADR_METHOD(float) getPan() = 0;\n\n /**\n * Set current pitch shift.\n *\n * @param shift can range from 0.5 to 2.0. default is 1.0.\n */\n ADR_METHOD(void) setPitchShift(float shift) = 0;\n\n /**\n * Get current pitch shift. Defaults to 1.0.\n */\n ADR_METHOD(float) getPitchShift() = 0;\n\n /**\n * @return true if the stream is seekable, false otherwise\n */\n ADR_METHOD(bool) isSeekable() = 0;\n\n /**\n * @return number of frames in the stream, or 0 if the stream is not\n * seekable\n */\n ADR_METHOD(int) getLength() = 0;\n \n /**\n * Sets the current position within the sample source. If the stream\n * is not seekable, this method does nothing.\n *\n * @param position current position in frames\n */\n ADR_METHOD(void) setPosition(int position) = 0;\n\n /**\n * Returns the current position within the sample source.\n *\n * @return current position in frames\n */\n ADR_METHOD(int) getPosition() = 0;\n };\n typedef RefPtr<OutputStream> OutputStreamPtr;\n\n\n /// An integral code representing a specific type of event.\n enum EventType {\n ET_STOP, ///< See StopEvent and StopCallback\n };\n\n\n /// Base interface for event-specific data passed to callbacks.\n class Event : public RefCounted {\n protected:\n ~Event() { }\n\n public:\n /// Returns the EventType code for this event.\n ADR_METHOD(EventType) getType() = 0;\n };\n typedef RefPtr<Event> EventPtr;\n\n\n /**\n * An event object that gets passed to implementations of StopCallback\n * when a stream has stopped playing.\n */\n class StopEvent : public Event {\n protected:\n ~StopEvent() { }\n\n public:\n EventType ADR_CALL getType() { return ET_STOP; }\n\n /// A code representing the reason the stream stopped playback.\n enum Reason {\n STOP_CALLED, ///< stop() was called from an external source.\n STREAM_ENDED, ///< The stream reached its end.\n };\n\n /**\n * @return Pointer to the OutputStream that stopped playback.\n */\n ADR_METHOD(OutputStream*) getOutputStream() = 0;\n\n /**\n * @return Reason for the stop event.\n */\n ADR_METHOD(Reason) getReason() = 0;\n };\n typedef RefPtr<StopEvent> StopEventPtr;\n\n\n /**\n * Base interface for all callbacks. See specific callback implementations\n * for descriptions.\n */ \n class Callback : public RefCounted {\n protected:\n ~Callback() { }\n\n public:\n /**\n * Returns the event type that this callback knows how to handle.\n */\n ADR_METHOD(EventType) getType() = 0;\n\n /**\n * Actually executes the callback with event-specific data. This is\n * only called if event->getType() == this->getType().\n */\n ADR_METHOD(void) call(Event* event) = 0;\n };\n typedef RefPtr<Callback> CallbackPtr;\n\n \n /**\n * To listen for stream stopped events on a device, implement this interface\n * and call registerStopCallback() on the device, passing your\n * implementation. streamStopped() will be called whenever a stream on that\n * device stops playback.\n *\n * WARNING: StopCallback is called from another thread. Make sure your\n * callback is thread-safe.\n */\n class StopCallback : public Callback {\n protected:\n ~StopCallback() { }\n\n public:\n EventType ADR_CALL getType() { return ET_STOP; }\n void ADR_CALL call(Event* event) {\n streamStopped(static_cast<StopEvent*>(event));\n }\n\n /**\n * Called when a stream has stopped.\n *\n * @param event Information pertaining to the event.\n */\n ADR_METHOD(void) streamStopped(StopEvent* event) = 0;\n };\n typedef RefPtr<StopCallback> StopCallbackPtr;\n\n\n /**\n * AudioDevice represents a device on the system which is capable\n * of opening and mixing multiple output streams. In Windows,\n * DirectSound is such a device.\n *\n * This interface is synchronized. update() and openStream() may\n * be called on different threads.\n */\n class AudioDevice : public RefCounted {\n protected:\n ~AudioDevice() { }\n\n public:\n /**\n * Tell the device to do any internal state updates. Some devices\n * update on an internal thread. If that is the case, this method\n * does nothing.\n */\n ADR_METHOD(void) update() = 0;\n\n /**\n * Open an output stream with a given sample source. If the sample\n * source ever runs out of data, the output stream automatically stops\n * itself.\n *\n * The output stream takes ownership of the sample source, even if\n * opening the output stream fails (in which case the source is\n * immediately deleted).\n *\n * @param source the source used to feed the output stream with samples\n *\n * @return new output stream if successful, 0 if failure\n */\n ADR_METHOD(OutputStream*) openStream(SampleSource* source) = 0;\n\n /**\n * Open a single buffer with the specified PCM data. This is sometimes\n * more efficient than streaming and works on a larger variety of audio\n * devices. In some implementations, this may download the audio data\n * to the sound card's memory itself.\n *\n * @param samples Buffer containing sample data. openBuffer() does\n * not take ownership of the memory. The application\n * is responsible for freeing it. There must be at\n * least |frame_count * channel_count *\n * GetSampleSize(sample_format)| bytes in the buffer.\n *\n * @param frame_count Number of frames in the buffer.\n *\n * @param channel_count Number of audio channels. 1 = mono, 2 = stereo.\n *\n * @param sample_rate Number of samples per second.\n *\n * @param sample_format Format of samples in buffer.\n *\n * @return new output stream if successful, 0 if failure\n */\n ADR_METHOD(OutputStream*) openBuffer(\n void* samples,\n int frame_count,\n int channel_count,\n int sample_rate,\n SampleFormat sample_format) = 0;\n\n /**\n * Gets the name of the audio device. For example \"directsound\" or \"oss\".\n *\n * @return name of audio device\n */\n ADR_METHOD(const char*) getName() = 0;\n\n /**\n * Registers 'callback' to receive events. Callbacks can be\n * registered multiple times.\n */\n ADR_METHOD(void) registerCallback(Callback* callback) = 0;\n \n /**\n * Unregisters 'callback' once. If it is registered multiple times,\n * each unregisterStopCallback call unregisters one of the instances.\n */\n ADR_METHOD(void) unregisterCallback(Callback* callback) = 0;\n\n /// Clears all of the callbacks from the device.\n ADR_METHOD(void) clearCallbacks() = 0;\n };\n typedef RefPtr<AudioDevice> AudioDevicePtr;\n\n\n /**\n * A readonly sample container which can open sample streams as iterators\n * through the buffer. This is commonly used in cases where a very large\n * sound effect is loaded once into memory and then streamed several times\n * to the audio device. This is more efficient memory-wise than loading\n * the effect multiple times.\n *\n * @see CreateSampleBuffer\n */\n class SampleBuffer : public RefCounted {\n protected:\n ~SampleBuffer() { }\n\n public:\n\n /**\n * Return the format of the sample data in the sample buffer.\n * @see SampleSource::getFormat\n */\n ADR_METHOD(void) getFormat(\n int& channel_count,\n int& sample_rate,\n SampleFormat& sample_format) = 0;\n\n /**\n * Get the length of the sample buffer in frames.\n */\n ADR_METHOD(int) getLength() = 0;\n\n /**\n * Get a readonly pointer to the samples contained within the buffer. The\n * buffer is |channel_count * frame_count * GetSampleSize(sample_format)|\n * bytes long.\n */\n virtual const void* ADR_CALL getSamples() = 0;\n\n /**\n * Open a seekable sample source using the samples contained in the\n * buffer.\n */\n ADR_METHOD(SampleSource*) openStream() = 0;\n };\n typedef RefPtr<SampleBuffer> SampleBufferPtr;\n\n\n /**\n * Defines the type of SoundEffect objects. @see SoundEffect\n */\n enum SoundEffectType {\n SINGLE,\n MULTIPLE,\n };\n\n\n /**\n * SoundEffect is a convenience class which provides a simple\n * mechanism for basic sound playback. There are two types of sound\n * effects: SINGLE and MULTIPLE. SINGLE sound effects only allow\n * the sound to be played once at a time. MULTIPLE sound effects\n * always open a new stream to the audio device for each time it is\n * played (cleaning up or reusing old streams if possible).\n */\n class SoundEffect : public RefCounted {\n protected:\n ~SoundEffect() { }\n\n public:\n /**\n * Trigger playback of the sound. If the SoundEffect is of type\n * SINGLE, this plays the sound if it isn't playing yet, and\n * starts it again if it is. If the SoundEffect is of type\n * MULTIPLE, play() simply starts playing the sound again.\n */\n ADR_METHOD(void) play() = 0;\n\n /**\n * If the sound is of type SINGLE, stop the sound. If it is of\n * type MULTIPLE, stop all playing instances of the sound.\n */\n ADR_METHOD(void) stop() = 0;\n\n /**\n * Sets the sound's volume.\n *\n * @param volume 0.0 = silence, 1.0 = maximum volume (default)\n */\n ADR_METHOD(void) setVolume(float volume) = 0;\n\n /**\n * Gets the current volume.\n *\n * @return current volume of the output stream\n */\n ADR_METHOD(float) getVolume() = 0;\n\n /**\n * Set current pan.\n *\n * @param pan -1.0 = left, 0.0 = center (default), 1.0 = right\n */\n ADR_METHOD(void) setPan(float pan) = 0;\n\n /**\n * Get current pan.\n */\n ADR_METHOD(float) getPan() = 0;\n\n /**\n * Set current pitch shift.\n *\n * @param shift can range from 0.5 to 2.0. default is 1.0.\n */\n ADR_METHOD(void) setPitchShift(float shift) = 0;\n\n /**\n * Get current pitch shift. Defaults to 1.0.\n */\n ADR_METHOD(float) getPitchShift() = 0;\n };\n typedef RefPtr<SoundEffect> SoundEffectPtr;\n\n\n /**\n * Represents a device capable of playing CD audio. Internally, this\n * uses the MCI subsystem in windows and libcdaudio on other platforms.\n * MCI subsystem: http://msdn.microsoft.com/library/default.asp?url=/library/en-us/multimed/htm/_win32_multimedia_command_strings.asp\n * libcdaudio: http://cdcd.undergrid.net/libcdaudio/\n */\n class CDDevice : public RefCounted {\n protected:\n virtual ~CDDevice() { }\n\n public:\n /**\n * Returns the name of this CD Device, often just the device name\n * it was created with.\n */\n virtual const char* ADR_CALL getName() = 0;\n\n /**\n * Returns the number of audio tracks on the disc.\n */\n ADR_METHOD(int) getTrackCount() = 0;\n\n /**\n * Starts playback of the given track. If another track was\n * already playing, the previous track is stopped. IMPORTANT: Tracks are\n * indexed from 0 to getTrackCount() - 1.\n */\n ADR_METHOD(void) play(int track) = 0;\n\n /**\n * Stops the playback, if the playback was already stopped, this\n * does nothing.\n */\n ADR_METHOD(void) stop() = 0;\n \n /**\n * pauses playback of the track that is currently playing (if any)\n * This does nothing if no track is playing\n */\n ADR_METHOD(void) pause() = 0;\n\n /**\n * Resumes playback of the track that is currently paused (if any).\n * This does nothing if no track is paused.\n */\n ADR_METHOD(void) resume() = 0;\n\n /**\n * Returns true if the CD is currently playing a sound, this could\n * be through us, or through some other program.\n */\n ADR_METHOD(bool) isPlaying() = 0;\n\n /**\n * Returns true if the drive contains a cd. This might be slow\n * on some systems, use with care.\n */\n ADR_METHOD(bool) containsCD() = 0;\n\n /// Returns true if the door is open.\n ADR_METHOD(bool) isDoorOpen() = 0;\n\n /// Opens this device's door.\n ADR_METHOD(void) openDoor() = 0;\n\n /// Closes this device's door.\n ADR_METHOD(void) closeDoor() = 0;\n };\n typedef RefPtr<CDDevice> CDDevicePtr;\n\n\n /**\n * An opened MIDI song that can be played, stopped, and seeked within.\n * Uses MCI under Windows and is not supported in other platforms.\n */\n class MIDIStream : public RefCounted {\n protected:\n virtual ~MIDIStream() { }\n\n public:\n /**\n * Begins playback of the song and does nothing if the song is already\n * playing.\n */\n ADR_METHOD(void) play() = 0;\n\n /// Stops playback of the song and seeks to the beginning.\n ADR_METHOD(void) stop() = 0;\n\n /**\n * Stops playback of the song and does not change its current position.\n * A subsequent play() will resume the song where it left off.\n */\n ADR_METHOD(void) pause() = 0;\n\n /// Returns true if the song is currently playing, false otherwise.\n ADR_METHOD(bool) isPlaying() = 0;\n\n /// Returns the length of the song in milliseconds.\n ADR_METHOD(int) getLength() = 0;\n\n /// Returns the current position of the song in milliseconds.\n ADR_METHOD(int) getPosition() = 0;\n\n /// Sets the current position of the song.\n ADR_METHOD(void) setPosition(int position) = 0;\n\n /// Returns true if this song is set to repeat.\n ADR_METHOD(bool) getRepeat() = 0;\n\n /// Sets whether the song should repeat on completion. Defaults to false.\n ADR_METHOD(void) setRepeat(bool repeat) = 0;\n };\n typedef RefPtr<MIDIStream> MIDIStreamPtr;\n\n\n /**\n * A MIDIDevice must be instantiated in order to open MIDIStreams.\n */\n class MIDIDevice : public RefCounted {\n protected:\n virtual ~MIDIDevice() { }\n\n public:\n /**\n * Returns the name of the device.\n */\n ADR_METHOD(const char*) getName() = 0;\n\n /**\n * openStream() creates and returns a new MIDIStream object from the\n * file with the specified name, which then can be queried and played.\n * This method returns NULL if the stream cannot be opened.\n *\n * Note: MCI subsystem limitations do not allow loading MIDIStream\n * objects from an audiere File implementation. This may be addressed\n * in future versions of this API.\n */\n ADR_METHOD(MIDIStream*) openStream(const char* filename) = 0;\n };\n typedef RefPtr<MIDIDevice> MIDIDevicePtr;\n\n\n /// PRIVATE API - for internal use only\n namespace hidden {\n\n // these are extern \"C\" so we don't mangle the names\n\n ADR_FUNCTION(const char*) AdrGetVersion();\n\n /**\n * Returns a formatted string that lists the file formats that Audiere\n * supports. This function is DLL-safe.\n *\n * It is formatted in the following way:\n *\n * description1:ext1,ext2,ext3;description2:ext1,ext2,ext3\n */\n ADR_FUNCTION(const char*) AdrGetSupportedFileFormats();\n\n /**\n * Returns a formatted string that lists the audio devices Audiere\n * supports. This function is DLL-safe.\n *\n * It is formatted in the following way:\n *\n * name1:description1;name2:description2;...\n */\n ADR_FUNCTION(const char*) AdrGetSupportedAudioDevices();\n\n ADR_FUNCTION(int) AdrGetSampleSize(SampleFormat format);\n\n ADR_FUNCTION(AudioDevice*) AdrOpenDevice(\n const char* name,\n const char* parameters);\n\n ADR_FUNCTION(SampleSource*) AdrOpenSampleSource(\n const char* filename,\n FileFormat file_format);\n ADR_FUNCTION(SampleSource*) AdrOpenSampleSourceFromFile(\n File* file,\n FileFormat file_format);\n ADR_FUNCTION(SampleSource*) AdrCreateTone(double frequency);\n ADR_FUNCTION(SampleSource*) AdrCreateSquareWave(double frequency);\n ADR_FUNCTION(SampleSource*) AdrCreateWhiteNoise();\n ADR_FUNCTION(SampleSource*) AdrCreatePinkNoise();\n\n ADR_FUNCTION(LoopPointSource*) AdrCreateLoopPointSource(\n SampleSource* source);\n\n ADR_FUNCTION(OutputStream*) AdrOpenSound(\n AudioDevice* device,\n SampleSource* source,\n bool streaming);\n\n ADR_FUNCTION(SampleBuffer*) AdrCreateSampleBuffer(\n void* samples,\n int frame_count,\n int channel_count,\n int sample_rate,\n SampleFormat sample_format);\n ADR_FUNCTION(SampleBuffer*) AdrCreateSampleBufferFromSource(\n SampleSource* source);\n\n ADR_FUNCTION(SoundEffect*) AdrOpenSoundEffect(\n AudioDevice* device,\n SampleSource* source,\n SoundEffectType type);\n\n ADR_FUNCTION(File*) AdrOpenFile(\n const char* name,\n bool writeable);\n\n ADR_FUNCTION(File*) AdrCreateMemoryFile(\n const void* buffer,\n int size);\n\n ADR_FUNCTION(const char*) AdrEnumerateCDDevices();\n\n ADR_FUNCTION(CDDevice*) AdrOpenCDDevice(\n const char* name); // Parameters?\n\n ADR_FUNCTION(MIDIDevice*) AdrOpenMIDIDevice(\n const char* name); // Parameters?\n }\n\n\n\n\n /*-------- PUBLIC API FUNCTIONS --------*/\n\n\n /**\n * Returns the Audiere version string.\n *\n * @return Audiere version information\n */\n inline const char* GetVersion() {\n return hidden::AdrGetVersion();\n }\n\n\n inline void SplitString(\n std::vector<std::string>& out,\n const char* in,\n char delim)\n {\n out.clear();\n while (*in) {\n const char* next = strchr(in, delim);\n if (next) {\n out.push_back(std::string(in, next));\n } else {\n out.push_back(in);\n }\n\n in = (next ? next + 1 : \"\");\n }\n }\n\n\n /// Describes a file format that Audiere supports.\n struct FileFormatDesc {\n /// Short description of format, such as \"MP3 Files\" or \"Mod Files\"\n std::string description;\n\n /// List of support extensions, such as {\"mod\", \"it\", \"xm\"}\n std::vector<std::string> extensions;\n };\n\n /// Populates a vector of FileFormatDesc structs.\n inline void GetSupportedFileFormats(std::vector<FileFormatDesc>& formats) {\n std::vector<std::string> descriptions;\n SplitString(descriptions, hidden::AdrGetSupportedFileFormats(), ';');\n\n formats.resize(descriptions.size());\n for (unsigned i = 0; i < descriptions.size(); ++i) {\n const char* d = descriptions[i].c_str();\n const char* colon = strchr(d, ':');\n formats[i].description.assign(d, colon);\n\n SplitString(formats[i].extensions, colon + 1, ',');\n }\n }\n\n\n /// Describes a supported audio device.\n struct AudioDeviceDesc {\n /// Name of device, i.e. \"directsound\", \"winmm\", or \"oss\"\n std::string name;\n\n // Textual description of device.\n std::string description;\n };\n\n /// Populates a vector of AudioDeviceDesc structs.\n inline void GetSupportedAudioDevices(std::vector<AudioDeviceDesc>& devices) {\n std::vector<std::string> descriptions;\n SplitString(descriptions, hidden::AdrGetSupportedAudioDevices(), ';');\n\n devices.resize(descriptions.size());\n for (unsigned i = 0; i < descriptions.size(); ++i) {\n std::vector<std::string> d;\n SplitString(d, descriptions[i].c_str(), ':');\n devices[i].name = d[0];\n devices[i].description = d[1];\n }\n }\n\n\n /**\n * Get the size of a sample in a specific sample format.\n * This is commonly used to determine how many bytes a chunk of\n * PCM data will take.\n *\n * @return Number of bytes a single sample in the specified format\n * takes.\n */\n inline int GetSampleSize(SampleFormat format) {\n return hidden::AdrGetSampleSize(format);\n }\n\n /**\n * Open a new audio device. If name or parameters are not specified,\n * defaults are used. Each platform has its own set of audio devices.\n * Every platform supports the \"null\" audio device.\n *\n * @param name name of audio device that should be used\n * @param parameters comma delimited list of audio-device parameters;\n * for example, \"buffer=100,rate=44100\"\n *\n * @return new audio device object if OpenDevice succeeds, and 0 in case\n * of failure\n */\n inline AudioDevice* OpenDevice(\n const char* name = 0,\n const char* parameters = 0)\n {\n return hidden::AdrOpenDevice(name, parameters);\n }\n\n /**\n * Create a streaming sample source from a sound file. This factory simply\n * opens a default file from the system filesystem and calls\n * OpenSampleSource(File*).\n *\n * @see OpenSampleSource(File*)\n */\n inline SampleSource* OpenSampleSource(\n const char* filename,\n FileFormat file_format = FF_AUTODETECT)\n {\n return hidden::AdrOpenSampleSource(filename, file_format);\n }\n\n /**\n * Opens a sample source from the specified file object. If the sound file\n * cannot be opened, this factory function returns 0.\n *\n * @note Some sound files support seeking, while some don't.\n *\n * @param file File object from which to open the decoder\n * @param file_format Format of the file to load. If FF_AUTODETECT,\n * Audiere will try opening the file in each format.\n *\n * @return new SampleSource if OpenSampleSource succeeds, 0 otherwise\n */\n inline SampleSource* OpenSampleSource(\n const FilePtr& file,\n FileFormat file_format = FF_AUTODETECT)\n {\n return hidden::AdrOpenSampleSourceFromFile(file.get(), file_format);\n }\n\n /**\n * Create a tone sample source with the specified frequency.\n *\n * @param frequency Frequency of the tone in Hz.\n *\n * @return tone sample source\n */\n inline SampleSource* CreateTone(double frequency) {\n return hidden::AdrCreateTone(frequency);\n }\n\n /**\n * Create a square wave with the specified frequency.\n *\n * @param frequency Frequency of the wave in Hz.\n *\n * @return wave sample source\n */\n inline SampleSource* CreateSquareWave(double frequency) {\n return hidden::AdrCreateSquareWave(frequency);\n }\n\n /**\n * Create a white noise sample source. White noise is just random\n * data.\n *\n * @return white noise sample source\n */\n inline SampleSource* CreateWhiteNoise() {\n return hidden::AdrCreateWhiteNoise();\n }\n\n /**\n * Create a pink noise sample source. Pink noise is noise with equal\n * power distribution among octaves (logarithmic), not frequencies.\n *\n * @return pink noise sample source\n */\n inline SampleSource* CreatePinkNoise() {\n return hidden::AdrCreatePinkNoise();\n }\n\n /**\n * Create a LoopPointSource from a SampleSource. The SampleSource must\n * be seekable. If it isn't, or the source isn't valid, this function\n * returns 0.\n */\n inline LoopPointSource* CreateLoopPointSource(\n const SampleSourcePtr& source)\n {\n return hidden::AdrCreateLoopPointSource(source.get());\n }\n\n /**\n * Creates a LoopPointSource from a source loaded from a file.\n */\n inline LoopPointSource* CreateLoopPointSource(\n const char* filename,\n FileFormat file_format = FF_AUTODETECT)\n {\n return CreateLoopPointSource(OpenSampleSource(filename, file_format));\n }\n\n /**\n * Creates a LoopPointSource from a source loaded from a file.\n */\n inline LoopPointSource* CreateLoopPointSource(\n const FilePtr& file,\n FileFormat file_format = FF_AUTODETECT)\n {\n return CreateLoopPointSource(OpenSampleSource(file, file_format));\n }\n\n /**\n * Try to open a sound buffer using the specified AudioDevice and\n * sample source. If the specified sample source is seekable, it\n * loads it into memory and uses AudioDevice::openBuffer to create\n * the output stream. If the stream is not seekable, it uses\n * AudioDevice::openStream to create the output stream. This means\n * that certain file types must always be streamed, and therefore,\n * OpenSound will hold on to the file object. If you must guarantee\n * that the file on disk is no longer referenced, you must create\n * your own memory file implementation and load your data into that\n * before calling OpenSound.\n *\n * @param device AudioDevice in which to open the output stream.\n *\n * @param source SampleSource used to generate samples for the sound\n * object. OpenSound takes ownership of source, even\n * if it returns 0. (In that case, OpenSound immediately\n * deletes the SampleSource.)\n *\n * @param streaming If false or unspecified, OpenSound attempts to\n * open the entire sound into memory. Otherwise, it\n * streams the sound from the file.\n *\n * @return new output stream if successful, 0 otherwise\n */\n inline OutputStream* OpenSound(\n const AudioDevicePtr& device,\n const SampleSourcePtr& source,\n bool streaming = false)\n {\n return hidden::AdrOpenSound(device.get(), source.get(), streaming);\n }\n\n /**\n * Calls OpenSound(AudioDevice*, SampleSource*) with a sample source\n * created via OpenSampleSource(const char*).\n */\n inline OutputStream* OpenSound(\n const AudioDevicePtr& device,\n const char* filename,\n bool streaming = false,\n FileFormat file_format = FF_AUTODETECT)\n {\n SampleSource* source = OpenSampleSource(filename, file_format);\n return OpenSound(device, source, streaming);\n }\n\n /**\n * Calls OpenSound(AudioDevice*, SampleSource*) with a sample source\n * created via OpenSampleSource(File* file).\n */\n inline OutputStream* OpenSound(\n const AudioDevicePtr& device,\n const FilePtr& file,\n bool streaming = false,\n FileFormat file_format = FF_AUTODETECT)\n {\n SampleSource* source = OpenSampleSource(file, file_format);\n return OpenSound(device, source, streaming);\n }\n\n /**\n * Create a SampleBuffer object using the specified samples and formats.\n *\n * @param samples Pointer to a buffer of samples used to initialize the\n * new object. If this is 0, the sample buffer contains\n * just silence.\n *\n * @param frame_count Size of the sample buffer in frames.\n *\n * @param channel_count Number of channels in each frame.\n *\n * @param sample_rate Sample rate in Hz.\n *\n * @param sample_format Format of each sample. @see SampleFormat.\n *\n * @return new SampleBuffer object\n */\n inline SampleBuffer* CreateSampleBuffer(\n void* samples,\n int frame_count,\n int channel_count,\n int sample_rate,\n SampleFormat sample_format)\n {\n return hidden::AdrCreateSampleBuffer(\n samples, frame_count,\n channel_count, sample_rate, sample_format);\n }\n\n /**\n * Create a SampleBuffer object from a SampleSource.\n *\n * @param source Seekable sample source used to create the buffer.\n * If the source is not seekable, then the function\n * fails.\n *\n * @return new sample buffer if success, 0 otherwise\n */\n inline SampleBuffer* CreateSampleBuffer(const SampleSourcePtr& source) {\n return hidden::AdrCreateSampleBufferFromSource(source.get());\n }\n\n /**\n * Open a SoundEffect object from the given sample source and sound\n * effect type. @see SoundEffect\n *\n * @param device AudioDevice on which the sound is played.\n *\n * @param source The sample source used to feed the sound effect\n * with data.\n *\n * @param type The type of the sound effect. If type is MULTIPLE,\n * the source must be seekable.\n *\n * @return new SoundEffect object if successful, 0 otherwise\n */\n inline SoundEffect* OpenSoundEffect(\n const AudioDevicePtr& device,\n const SampleSourcePtr& source,\n SoundEffectType type)\n {\n return hidden::AdrOpenSoundEffect(device.get(), source.get(), type);\n }\n\n /**\n * Calls OpenSoundEffect(AudioDevice*, SampleSource*,\n * SoundEffectType) with a sample source created from the filename.\n */\n inline SoundEffect* OpenSoundEffect(\n const AudioDevicePtr& device,\n const char* filename,\n SoundEffectType type,\n FileFormat file_format = FF_AUTODETECT)\n {\n SampleSource* source = OpenSampleSource(filename, file_format);\n return OpenSoundEffect(device, source, type);\n }\n\n /**\n * Calls OpenSoundEffect(AudioDevice*, SampleSource*,\n * SoundEffectType) with a sample source created from the file.\n */\n inline SoundEffect* OpenSoundEffect(\n const AudioDevicePtr& device,\n const FilePtr& file,\n SoundEffectType type,\n FileFormat file_format = FF_AUTODETECT)\n {\n SampleSource* source = OpenSampleSource(file, file_format);\n return OpenSoundEffect(device, source, type);\n }\n\n /**\n * Opens a default file implementation from the local filesystem.\n *\n * @param filename The name of the file on the local filesystem.\n * @param writeable Whether the writing to the file is allowed.\n */\n inline File* OpenFile(const char* filename, bool writeable) {\n return hidden::AdrOpenFile(filename, writeable);\n }\n\n /**\n * Creates a File implementation that reads from a buffer in memory.\n * It stores a copy of the buffer that is passed in.\n *\n * The File object does <i>not</i> take ownership of the memory buffer.\n * When the file is destroyed, it will not free the memory.\n *\n * @param buffer Pointer to the beginning of the data.\n * @param size Size of the buffer in bytes.\n *\n * @return 0 if size is non-zero and buffer is null. Otherwise,\n * returns a valid File object.\n */\n inline File* CreateMemoryFile(const void* buffer, int size) {\n return hidden::AdrCreateMemoryFile(buffer, size);\n }\n\n /**\n * Generates a list of available CD device names.\n *\n * @param devices A vector of strings to be filled.\n */\n inline void EnumerateCDDevices(std::vector<std::string>& devices) {\n const char* d = hidden::AdrEnumerateCDDevices();\n while (d && *d) {\n devices.push_back(d);\n d += strlen(d) + 1;\n }\n }\n\n /**\n * Opens the specified CD playback device.\n * \n * @param device The filesystem device to be played.\n * e.g. Linux: \"/dev/cdrom\", Windows: \"D:\"\n *\n * @return 0 if opening device failed, valid CDDrive object otherwise.\n */\n inline CDDevice* OpenCDDevice(const char* device) {\n return hidden::AdrOpenCDDevice(device);\n }\n\n /**\n * Opens the specified MIDI synthesizer device.\n *\n * @param device The name of the device. Unused for now.\n *\n * @return 0 if opening device failed, valid MIDIDevice object otherwise.\n */\n inline MIDIDevice* OpenMIDIDevice(const char* device) {\n return hidden::AdrOpenMIDIDevice(device);\n }\n\n}\n\n\n#endif\n" }, { "alpha_fraction": 0.6909436583518982, "alphanum_fraction": 0.6998100280761719, "avg_line_length": 33.326087951660156, "blob_id": "693b77254e0f6fb519b8e7708204544b216a3496", "content_id": "27b14f6bcbd846eb238a748eaad9958938490c78", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1579, "license_type": "permissive", "max_line_length": 114, "num_lines": 46, "path": "/Sources/Core/Graphics/Shader.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <string>\n#include <glm/fwd.hpp>\n\n#include \"../ResourcesFWD.hpp\"\n\nnamespace Core::Graphics\n{\n\n class Shader\n {\n public:\n Shader() noexcept = default;\n Shader(Resources::VertexShaderSPtr vertex_shader, Resources::FragmentShaderSPtr fragment_shader) noexcept;\n void use() const noexcept;\n bool isValid() const noexcept;\n void setUniformi(const char* name, int value) const noexcept;\n void setUniformf(const char* name, float value) const noexcept;\n void setUniformVector3f(const char* name, const glm::vec3& vector) const noexcept;\n void setUniformMatrix4x4f(const char* name, const glm::mat4& matrix) const noexcept;\n ~Shader();\n private:\n unsigned int compileShader(const std::string& shader_data, int shader_type) noexcept;\n void linkShaders(unsigned int vertex_shader, unsigned int fragment_shader) noexcept;\n private: \n unsigned int m_program_id = 0;\n };\n\n}\n" }, { "alpha_fraction": 0.7342657446861267, "alphanum_fraction": 0.7412587404251099, "avg_line_length": 29.105262756347656, "blob_id": "a5d731c25304b83251fc87b25fbae895e0a5e6e0", "content_id": "7282065e180da09021f730bdc1c0e8d715f52e29", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1144, "license_type": "permissive", "max_line_length": 93, "num_lines": 38, "path": "/Sources/Core/Resources/Sound.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"Sound.hpp\"\n\n#include <boost/filesystem/convenience.hpp>\n\n#include \"Helpers/Debug.hpp\"\n#include \"Loaders/SoundLoader.hpp\"\n\naudiere::OutputStreamPtr Core::Resources::Sound::getAudioStream() const noexcept\n{\n return m_audio_stream;\n}\n\nbool Core::Resources::Sound::load(std::string_view sound_path) noexcept\n{\n return Loaders::SoundLoader::load(*this, sound_path, false);\n}\n\nvoid Core::Resources::Sound::setAudioStream(audiere::OutputStreamPtr&& audio_stream) noexcept\n{\n m_audio_stream = audio_stream;\n}\n" }, { "alpha_fraction": 0.6492042541503906, "alphanum_fraction": 0.6564986705780029, "avg_line_length": 38.68421173095703, "blob_id": "64ee35ef45065c7242517571099107f366257b99", "content_id": "49a0b276eb3547ffb9eea1ce52aaecc0cbbaa5fe", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3016, "license_type": "permissive", "max_line_length": 77, "num_lines": 76, "path": "/Sources/Core/Resources/Map.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include \"Aliases.hpp\"\n#include \"IResource.hpp\"\n#include \"../Helpers/Holders/Polymorphic.hpp\"\n\nnamespace Core::Resources\n{\n\n class Map : public IResource, public Helpers::Holders::Polymorphic<Map>\n {\n public:\n class MapObject\n {\n public:\n void setScale(float scale) noexcept;\n void setXRotation(float rotation) noexcept;\n void setYRotation(float rotation) noexcept;\n void setZRotation(float rotation) noexcept;\n void setPosition(const bg::point3f_t& position) noexcept;\n float getScale() const noexcept;\n float getXRotation() const noexcept;\n float getYRotation() const noexcept;\n float getZRotation() const noexcept;\n const bg::point3f_t& getPosition() const noexcept;\n private:\n bg::point3f_t m_position{};\n float m_scale = 1.0f;\n float m_x_rotation = 1.0f;\n float m_y_rotation = 1.0f;\n float m_z_rotation = 1.0f;\n };\n public:\n void setDimensions(const bg::box2f_t& dimensions) noexcept;\n void setTreeModelsName(const std::string& model_name);\n void setHouseModelsName(const std::string& model_name);\n void setGroundModelsName(const std::string& model_name);\n void setTreeObjects(std::vector<MapObject>&& map_objects) noexcept;\n void setHouseObjects(std::vector<MapObject>&& map_objects) noexcept;\n void setGroundObjects(std::vector<MapObject>&& map_objects) noexcept;\n const bg::box2f_t& getDimensions() const noexcept;\n std::string_view getTreeModelsName() const noexcept;\n std::string_view getHouseModelsName() const noexcept;\n std::string_view getGroundModelsName() const noexcept;\n const std::vector<MapObject>& getTreeObjects() const noexcept;\n const std::vector<MapObject>& getHouseObjects() const noexcept;\n const std::vector<MapObject>& getGroundObjects() const noexcept;\n public:\n bool load(std::string_view map_path) noexcept override;\n private:\n std::string m_tree_models_name{};\n std::string m_house_models_name{};\n std::string m_ground_models_name{};\n std::vector<MapObject> m_tree_objects{};\n std::vector<MapObject> m_house_objects{};\n std::vector<MapObject> m_ground_objects{};\n bg::box2f_t m_map_dimensions{};\n };\n\n}\n" }, { "alpha_fraction": 0.674481987953186, "alphanum_fraction": 0.6962922811508179, "avg_line_length": 29.065574645996094, "blob_id": "79f8f2f6bdd04889aaab39275e40775656b9034a", "content_id": "5b4719d2e502ef3bf0f805a14ec751003e5e541a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1834, "license_type": "permissive", "max_line_length": 92, "num_lines": 61, "path": "/Sources/Aliases.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <boost/geometry.hpp>\n#include <boost/filesystem.hpp>\n#include <boost/polygon/polygon.hpp>\n#include <boost/property_tree/ptree.hpp>\n#include <boost/property_tree/xml_parser.hpp>\n\nnamespace boost::geometry\n{\n\n template<typename Type>\n using point2_t = boost::geometry::model::point<Type, 2, boost::geometry::cs::cartesian>;\n\n using point2i_t = point2_t<int>;\n using point2f_t = point2_t<float>;\n using point2d_dt = point2_t<double>;\n\n template<typename Type>\n using point3_t = boost::geometry::model::point<Type, 3, boost::geometry::cs::cartesian>;\n\n using point3i_t = point3_t<int>;\n using point3f_t = point3_t<float>;\n using point3d_t = point3_t<double>;\n\n template<typename Type>\n using box2_t = boost::geometry::model::box<point2_t<Type>>;\n\n using box2i_t = box2_t<int>;\n using box2f_t = box2_t<float>;\n using box2d_t = box2_t<double>;\n\n template<typename Type>\n using box3_t = boost::geometry::model::box<point3_t<Type>>;\n\n using box3i_t = box3_t<int>;\n using box3f_t = box3_t<float>;\n using box3d_t = box3_t<double>;\n\n}\n\nnamespace bp = boost::polygon;\nnamespace bg = boost::geometry;\nnamespace bf = boost::filesystem;\nnamespace bpt = boost::property_tree;\n" }, { "alpha_fraction": 0.6767787933349609, "alphanum_fraction": 0.6872586607933044, "avg_line_length": 32.574073791503906, "blob_id": "0fc52a2dd8f2a04059899dc3671822c919b6d883", "content_id": "c1ad3ef5979dce5f3bda685d9ffefebd7d6ce88a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1813, "license_type": "permissive", "max_line_length": 75, "num_lines": 54, "path": "/Sources/Core/Input/MouseState.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\n#include <atomic>\n#include <GLFW/glfw3.h>\n\n#include \"../Helpers/Holders/Singleton.hpp\"\n\n#ifndef g_mouse_state\n #define g_mouse_state Core::Input::MouseState::getInstance()\n#endif // g_mouse_state\n\nnamespace Core::Input \n{\n\n class MouseState : public Helpers::Holders::Singleton<MouseState>\n {\n friend void onMouseMoveEvent(GLFWwindow*, double, double) noexcept;\n public:\n bool isPressedLeftButton() const noexcept;\n bool isPressedRightButton() const noexcept;\n int getAndUnsetXDisplacementOffset() noexcept;\n int getAndUnsetYDisplacementOffset() noexcept;\n private:\n void pressLeftButton() noexcept;\n void pressRightButton() noexcept;\n void releaseLeftButton() noexcept;\n void releaseRightButton() noexcept;\n void setPosition(int x_pos, int y_pos) noexcept;\n private:\n std::atomic_int m_x_position = 0;\n std::atomic_int m_y_position = 0;\n std::atomic_int m_last_x_position = 430; // window_width / 2\n std::atomic_int m_last_y_position = 300; // window_height / 2\n std::atomic_bool m_is_pressed_left_button = false;\n std::atomic_bool m_is_pressed_right_button = false;\n };\n\n}\n" }, { "alpha_fraction": 0.7180895805358887, "alphanum_fraction": 0.721470832824707, "avg_line_length": 39.1016960144043, "blob_id": "7f71efa9a88cf9400471683fddc806d3d70d5294", "content_id": "7004b800249fdd513878fee70b3c275a7a68938f", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2366, "license_type": "permissive", "max_line_length": 115, "num_lines": 59, "path": "/Sources/Core/Managers/ResourceManager.cpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#include \"PrecompiledHeader.hpp\"\n#include \"ResourceManager.hpp\"\n\n#include <boost/property_tree/ptree.hpp>\n#include <boost/property_tree/ini_parser.hpp>\n\n#include \"ConfigurationManager.hpp\"\n#include \"../Resources.hpp\"\n#include \"../Helpers/Time.hpp\"\n#include \"../Helpers/Debug.hpp\"\n\nvoid Core::Managers::ResourceManager::initialize()\n{\n#ifdef _DEBUG\n const auto start_time = Helpers::getCurrentTimeInMilliseconds<double>();\n#endif // _DEBUG\n\n const std::string resources_path = STR(g_configuration_manager.getResourcesPath());\n const std::string resources_config_filename = STR(g_configuration_manager.getResourcesConfigurationFilename());\n const std::string resources_config_file_full_path = resources_path + resources_config_filename;\n\n try\n {\n boost::property_tree::ptree ini_configuration{};\n read_ini(resources_config_file_full_path, ini_configuration);\n loadSection<Resources::Map>(ini_configuration.get_child(\"Maps\"));\n loadSection<Resources::Model>(ini_configuration.get_child(\"Models\"));\n loadSection<Resources::Sound>(ini_configuration.get_child(\"Sounds\"));\n loadSection<Resources::Image>(ini_configuration.get_child(\"Textures\"));\n loadSection<Resources::VertexShader>(ini_configuration.get_child(\"VertexShaders\"));\n loadSection<Resources::FragmentShader>(ini_configuration.get_child(\"FragmentShaders\"));\n }\n catch (const boost::property_tree::ini_parser_error&)\n {\n LOG_ERROR(\"'PlayerManager' was not initialized.\");\n }\n\n#ifdef _DEBUG\n const auto end_time = Helpers::getCurrentTimeInMilliseconds<double>();\n const auto loading_time = end_time - start_time;\n LOG_PROFILING(\"'ResourceManager' was initialized in \" + TO_STR(loading_time) + \"ms.\");\n#endif // _DEBUG\n}\n" }, { "alpha_fraction": 0.7343867421150208, "alphanum_fraction": 0.740406334400177, "avg_line_length": 27.89130401611328, "blob_id": "6da5cf19a4d5cfd8852387e77a05e252eeb19753", "content_id": "51e651c344c9119c7507854104d5cf09f481b492", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1329, "license_type": "permissive", "max_line_length": 87, "num_lines": 46, "path": "/Sources/Core/ManagersFWD.hpp", "repo_name": "VladimirBalun/RacingWorld", "src_encoding": "UTF-8", "text": "/*\n * Copyright 2018 Vladimir Balun\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n\n#pragma once\n\nnamespace Core::Managers \n{\n\n template<class T>\n struct IManager;\n\n class SoundManager;\n class LocaleManager;\n class ResourceManager;\n class ConfigurationManager;\n\n}\n\n#ifndef g_sound_manager\n #define g_sound_manager Core::Managers::SoundManager::getInstance()\n#endif // g_sound_manager\n\n#ifndef g_locale_manager\n #define g_locale_manager Core::Managers::LocaleManager::getInstance()\n#endif // g_locale_manager\n\n#ifndef g_resource_manager\n #define g_resource_manager Core::Managers::ResourceManager::getInstance()\n#endif // g_resource_manager\n\n#ifndef g_configuration_manager\n #define g_configuration_manager Core::Managers::ConfigurationManager::getInstance()\n#endif // g_configuration_manager\n" } ]
84
kanepa/helloflask
https://github.com/kanepa/helloflask
9d97adda53718d04c585758a3e8cb3bc349f238a
e27f9eb5def251638888634beddad30eff10c87c
b6262be30b21fb8e616af509c5b1747e0841fbb4
refs/heads/master
2021-01-10T23:28:24.090780
2016-10-12T13:25:41
2016-10-12T13:25:41
70,582,434
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5799689292907715, "avg_line_length": 16.88888931274414, "blob_id": "717ac051ed51ee4d8892a58190f39a5f7c8a666f", "content_id": "6359837c5ec76a982494de99db498de574850c0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1288, "license_type": "no_license", "max_line_length": 88, "num_lines": 72, "path": "/helloflask.py", "repo_name": "kanepa/helloflask", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template\n\napp = Flask(__name__)\n\n\n\nalbums =[{\n 'artist': 'Madonna',\n 'title' : 'True Blue',\n 'year' : '1987'\n}]\n\ngenres =['rock', 'blues', 'pop']\n\nsuper = ['batman', 'superman', 'spiderman']\n\nuser = ['ironman', 'antman', 'robin']\n\nages = {\n 'bob': '43',\n 'alice': '29'\n\n}\n\n\nartists = [{\n\n 'id': '1',\n 'name': 'Gandolf',\n 'title': 'The lord of the Rings'\n },\n { 'id':'2',\n 'name': 'Patterson',\n 'tile': 'walk alone'\n },\n {\n 'id':'3',\n ''\n ''\n 'name': 'Fletcher',\n 'title': 'West Brom'\n}]\n\[email protected]('/')\ndef hello_world():\n return 'Hello World!'\n\[email protected]('/albums')\ndef list_albums():\n return render_template('helloflask.html', albums=albums, genres=genres, super=super)\n\[email protected]('/albums')\ndef get_albums():\n return render_template(\"albums.html\",albums=albums)\n\[email protected]('/artists')\ndef get_artists():\n return render_template(\"artists.html\", artists=artists, super=super)\n\n\[email protected]('/artists/<id>')\ndef get_artist_details(id):\n return render_template(\"artist_details.html\", artists=artists)\n\[email protected]('/user/<user>')\ndef user(user):\n age = ages.get(user)\n return render_template('helloflask.html', user=user, age=age)\n\n\nif __name__ == '__main__':\n app.run()\n" } ]
1
MattMuelot/Trumpus
https://github.com/MattMuelot/Trumpus
bb56bb0fd5ee56b997f35d10ff81a0c382d607cd
e81e55034c70533a68578b0f4030d683c9dd6e6d
7e6cc87377920c259155c287b231870984048606
refs/heads/master
2023-07-08T02:56:33.457815
2021-08-08T20:12:31
2021-08-08T20:12:31
387,551,504
0
0
null
2021-07-19T17:57:28
2021-08-08T04:33:28
2021-08-08T20:12:31
Python
[ { "alpha_fraction": 0.5032479763031006, "alphanum_fraction": 0.5387848615646362, "avg_line_length": 44.91228103637695, "blob_id": "6b55761036b0a50956df56e9089246f71ee6347f", "content_id": "98a97b26b9675e39558fd12e0a76bc37d074fcc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2617, "license_type": "no_license", "max_line_length": 93, "num_lines": 57, "path": "/menu_screens.py", "repo_name": "MattMuelot/Trumpus", "src_encoding": "UTF-8", "text": "import pygame\n\n\nclass Menus:\n def __init__(self, screen, clock):\n self.screen = screen\n self.clock = clock\n self.main_menu_bg = pygame.image.load('Assets/mainmenu.png').convert_alpha()\n self.settings_bg = pygame.image.load('Assets/settings.png').convert_alpha()\n self.click_sound = pygame.mixer.Sound('Assets/click.ogg')\n self.font = pygame.font.SysFont('arial', 50)\n self.start_rect = pygame.Rect(58, 485, 210, 78)\n self.settings_rect = pygame.Rect(523, 485, 210, 78)\n self.menu_rect = pygame.Rect(300, 485, 190, 66)\n self.mouse_rect = pygame.Rect(50, 50, 5, 5)\n self.running = True\n self.FPS = 60\n pygame.mixer.music.load('Assets/starspangled.mp3')\n pygame.mixer.music.play(-1)\n\n def main_menu(self):\n while self.running:\n self.clock.tick(self.FPS)\n self.screen.blit(self.main_menu_bg, (0, 0))\n mouse = pygame.mouse.get_pos()\n self.mouse_rect.x, self.mouse_rect.y = mouse[0], mouse[1]\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return 'quit'\n if event.type == pygame.MOUSEBUTTONUP:\n self.click_sound.play()\n if self.mouse_rect.colliderect(self.start_rect):\n print('start click')\n pygame.draw.rect(self.screen, (255, 255, 255), self.start_rect, 2)\n return 'start'\n if self.mouse_rect.colliderect(self.settings_rect):\n pygame.draw.rect(self.screen, (255, 255, 255), self.settings_rect, 2)\n return 'settings'\n # pygame.draw.rect(self.screen, (255, 0, 0), self.mouse_rect)\n pygame.display.update()\n\n def settings(self):\n while self.running:\n self.clock.tick(self.FPS)\n self.screen.blit(self.settings_bg, (0, 0))\n mouse = pygame.mouse.get_pos()\n self.mouse_rect.x, self.mouse_rect.y = mouse[0], mouse[1]\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return 'quit'\n if event.type == pygame.MOUSEBUTTONUP:\n self.click_sound.play()\n if self.mouse_rect.colliderect(self.menu_rect):\n pygame.draw.rect(self.screen, (255, 255, 255), self.menu_rect, 2)\n return 'menu'\n pygame.draw.rect(self.screen, (255, 0, 0), self.menu_rect, 2)\n pygame.display.update()\n" }, { "alpha_fraction": 0.7249466776847839, "alphanum_fraction": 0.7377398610115051, "avg_line_length": 23.736841201782227, "blob_id": "3bfd6ca214352d1c1ceaa056c7b6900cdb8cadde", "content_id": "220f5ad541eb951ed20c042b60d329b869f73526", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 469, "license_type": "no_license", "max_line_length": 85, "num_lines": 19, "path": "/README.md", "repo_name": "MattMuelot/Trumpus", "src_encoding": "UTF-8", "text": "<b>WASD to Move, J to Jump, and Space to Shoot!</b>\n\n\n<h1>Make your way through Washington DC to try to reach the White House!</h1>\n\n\n![Alt text](/Assets/Screenshots/level-one-screenshot.jpg)\n\n\n<h1>Battle your way though the White House grounds, fighting off Secret Service!</h1>\n\n\n![Alt text](/Assets/Screenshots/level-two-screenshot.jpg)\n\n\n<h1>Fight inside the White House, try to reclaim your glory!</h1>\n\n\n![Alt text](/Assets/Screenshots/level-three-screenshot.jpg)" }, { "alpha_fraction": 0.5990179777145386, "alphanum_fraction": 0.6121112704277039, "avg_line_length": 29.450000762939453, "blob_id": "bd52e71dbb63862296d833cacbc67ea6ad0d1cd5", "content_id": "fff571edd6b4b17fd4a9f36b8750be18f144c421", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 611, "license_type": "no_license", "max_line_length": 113, "num_lines": 20, "path": "/main.py", "repo_name": "MattMuelot/Trumpus", "src_encoding": "UTF-8", "text": "\"\"\"\nThis game is still a work in progress. All code here is subject to change. I welcome any and all advice/feedback.\nPlease fork and do a PR if you have any changes you'd like to implement.\n -VeinyAngus (MattMuelot)\n\"\"\"\nimport pygame\nfrom level import Levels\nfrom menu_screens import Menus\n\n\n# ------------- Initialize Pygame, create screen and clock objects --------------#\n\npygame.init()\nscreen = pygame.display.set_mode((800, 600))\nclock = pygame.time.Clock()\n\n# --------------------- MAIN GAME LOOP --------------------- #\n\nl = Levels(screen, clock, 60)\nl.main_game_loop()\n\n\n" }, { "alpha_fraction": 0.5204238891601562, "alphanum_fraction": 0.5412079691886902, "avg_line_length": 35.13011169433594, "blob_id": "7ae1840490ee1bbfab33db4205a0cf05fd67184f", "content_id": "f1b8d8db4e16bb1068dd283cfd49432240bed96d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9719, "license_type": "no_license", "max_line_length": 114, "num_lines": 269, "path": "/game_classes.py", "repo_name": "MattMuelot/Trumpus", "src_encoding": "UTF-8", "text": "import pygame\nimport random\nimport json\n# -------------------------------------- TRUMP (PLAYER) CLASS ------------------------------------------ #\n\n\nclass Trump:\n \"\"\"Our main player object\"\"\"\n def __init__(self):\n self.x = 250\n self.y = 370\n with open('config_files/level_settings.json', 'r') as f:\n self.data = json.load(f)\n self.x_vel = 5\n self.y_vel = 5\n self.rect = pygame.Rect(self.x, self.y, 50, 100)\n self.lives = 5\n self.money = 5\n self.money_shot = []\n self.img = pygame.image.load('Assets/trump.png').convert_alpha()\n self.jump_vel = 20\n self.jumping = False\n self.wave = 0\n self.agents_left = 25\n\n def update_rect(self):\n \"\"\"Updates the objects Rect using the current x and y values\"\"\"\n self.rect = pygame.Rect(self.x, self.y, 50, 100)\n\n def draw(self, s):\n \"\"\"Draws the img attribute to screen on the current x and y values\"\"\"\n self.update_rect()\n s.blit(self.img, (self.x, self.y))\n\n def move(self, s):\n \"\"\"Corresponding key presses move player in multiple directions\"\"\"\n keys = pygame.key.get_pressed()\n if keys[pygame.K_d] and self.x + self.x_vel <= 750:\n self.x += self.x_vel\n self.draw(s)\n if keys[pygame.K_a] and self.x - self.x_vel > 0:\n self.x -= self.x_vel\n self.draw(s)\n if self.jumping:\n pass\n else:\n if keys[pygame.K_w] and self.y - self.y_vel > 370:\n self.y -= self.y_vel\n self.draw(s)\n if keys[pygame.K_s] and self.y + self.y_vel < 500:\n self.y += self.y_vel\n self.draw(s)\n\n def jump(self, s):\n \"\"\"During main game loop, if the player jumping attribute is true, it will run\"\"\"\n if self.jumping:\n self.y -= self.jump_vel\n self.jump_vel -= 1 # For realistic jumping, velocity slows down as player reaches apex of jump\n self.draw(s) # Draw character to screen based upon current x and y coordinates\n if self.jump_vel == -21: # If we have reached our starting position, reset jump velocity/set to false\n self.jump_vel = 20\n self.jumping = False\n\n def reset(self):\n self.x = 250\n self.y = 370\n self.lives = 5\n self.money = 5\n self.money_shot = []\n self.wave = 0\n self.agents_left = 25\n\n\n# -------------------------------------- DECLARATION CLASS ------------------------------------------- #\n\n\nclass Declaration:\n \"\"\"One of the enemy objects. This object moves in sync with the background to give an illusion\n of being static in relation to the background\"\"\"\n def __init__(self):\n self.x = random.randint(900, 3000)\n self.y = random.randint(400, 550)\n self.rect = pygame.Rect(self.x, self.y, 50, 50)\n self.img = pygame.image.load('Assets/declaration.png').convert_alpha()\n self.burn_img = pygame.image.load('Assets/burnedec.png').convert_alpha()\n self.timer = False\n self.its = 0\n\n def update_rect(self):\n \"\"\"Updates the objects Rect using the current x and y values\"\"\"\n self.rect = pygame.Rect(self.x, self.y, 50, 50)\n\n def draw(self, s):\n \"\"\"Draws the img attribute to screen on the current x and y values\"\"\"\n if self.timer is False:\n self.update_rect()\n s.blit(self.img, (self.x, self.y))\n return False\n # self.timer attribute will be true if struck by a projectile\n # and will show the burn image for 30 frames\n elif self.timer:\n self.update_rect()\n s.blit(self.burn_img, (self.x, self.y))\n self.its += 1\n if self.its >= 30:\n return True\n\n def move(self, s):\n \"\"\"x moves -2 for each game-loop to maintain pace with game background, so object appears static\"\"\"\n self.x -= 2\n self.update_rect()\n if self.x < -50:\n return True\n\n\n# ------------------------------------------ MONEYBAG CLASS ----------------------------------------------- #\n\n\nclass Moneybag:\n \"\"\"Bag of money object. Player needs to collide with these to collect it\"\"\"\n def __init__(self):\n self.x = random.randint(900, 3000)\n self.y = random.randint(370, 550)\n self.rect = pygame.Rect(self.x, self.y, 50, 50)\n self.img = pygame.image.load('Assets/moneybag.png').convert_alpha()\n\n def update_rect(self):\n \"\"\"Updates the objects Rect using the current x and y values\"\"\"\n self.rect = pygame.Rect(self.x, self.y, 50, 50)\n\n def draw(self, s):\n \"\"\"Draws the img attribute to screen on the current x and y values\"\"\"\n self.update_rect()\n s.blit(self.img, (self.x, self.y))\n\n def move(self, s):\n \"\"\"x moves -2 for each game-loop to maintain pace with game background, so object appears static\"\"\"\n self.x -= 2\n self.draw(s)\n if self.x < -50:\n return True\n\n\n# -------------------------------------------- BILL CLASS ------------------------------------------------ #\n\n\nclass Bill:\n \"\"\"This is the projectile that is created when the player fires\"\"\"\n\n def __init__(self, t):\n self.x = t.x + 28\n self.y = t.y + 45\n self.vel = 10\n self.rect = pygame.Rect(self.x, self.y, 35, 15)\n self.img = pygame.image.load('Assets/bill-35-15.png')\n\n def update_rect(self):\n \"\"\"Updates the objects Rect using the current x and y values\"\"\"\n self.rect = pygame.Rect(self.x, self.y, 35, 15)\n\n def draw(self, s):\n \"\"\"Draws the img attribute to screen on the current x and y values\"\"\"\n self.update_rect()\n s.blit(self.img, (self.x, self.y))\n\n def move(self, s):\n \"\"\"Moves the projectile every frame. If the projectiles x value is greater than the width of the screen\n we return True to demonstrate that the projectile has indeed gone off screen.\"\"\"\n self.x += self.vel\n self.draw(s)\n if self.x > 800:\n return True\n\n\n# --------------------------------------- BULLET CLASS ----------------------------------------------- #\n\n\nclass Bullet:\n \"\"\"This is the projectile shot by the SecretService agents\"\"\"\n def __init__(self, a):\n self.x = a.x - 6\n self.y = a.y + 33\n self.vel = 6\n self.rect = pygame.Rect(self.x, self.y, 10, 5)\n self.img = pygame.image.load('Assets/bullet.png').convert_alpha()\n\n def update_rect(self):\n \"\"\"Updates the objects Rect using the current x and y values\"\"\"\n self.rect = pygame.Rect(self.x, self.y, 10, 5)\n\n def draw(self, s):\n \"\"\"Draws the img attribute to screen on the current x and y values\"\"\"\n self.update_rect()\n s.blit(self.img, (self.x, self.y))\n\n def move(self, s):\n \"\"\"Moves the projectile every frame. If the projectiles x value is less than the width of the screen\n we return True to demonstrate that the projectile has indeed gone off screen.\"\"\"\n self.x -= self.vel\n self.draw(s)\n if self.x < 0:\n return True\n\n\n# --------------------------------------- SECRET SERVICE CLASS -------------------------------------------- #\n\n\nclass SecretService:\n \"\"\"One of the main enemy objects. These objects shoot Bullets() at set intervals\"\"\"\n def __init__(self):\n self.x = random.randint(900, 3000)\n self.y = random.randint(400, 500)\n # self.x = 300\n # self.y = 300\n self.rect = pygame.Rect(self.x, self.y, 50, 100)\n self.img = pygame.image.load('Assets/agent.png').convert_alpha()\n self.shoot_hold = True\n self.shots = []\n self.counter = 4000\n\n def update_rect(self):\n \"\"\"Updates the objects Rect using the current x and y values\"\"\"\n self.rect = pygame.Rect(self.x, self.y, 50, 100)\n\n def draw(self, s):\n \"\"\"Draws the img attribute to screen on the current x and y values\"\"\"\n self.update_rect()\n s.blit(self.img, (self.x, self.y))\n\n def shoot(self, b):\n \"\"\"If the self.shoot_hold attribute is not True (attribute is set to true in main game loop\n when bullet is fired until the self.counter attribute reaches 0) then a Bullet() object is created\n in and appended to the list (argument b)\"\"\"\n if not self.shoot_hold:\n b.append(Bullet(self))\n\n def move(self, s):\n \"\"\"x moves -2 for each game-loop to maintain pace with game background, so object appears static\"\"\"\n self.x -= 2\n self.draw(s)\n if self.x < -50:\n return True\n\n# ------------------------------------------ HEART CLASS ------------------------------------------- #\n\n\nclass Heart:\n \"\"\"This appears on screen and if a player collides with it, they are granted an additional life\"\"\"\n def __init__(self):\n self.x = random.randint(900, 8000)\n self.y = random.randint(400, 550)\n self.rect = pygame.Rect(self.x, self.y, 25, 25)\n self.img = pygame.image.load('Assets/heart.png').convert_alpha()\n\n def update_rect(self):\n \"\"\"Updates the objects Rect using the current x and y values\"\"\"\n self.rect = pygame.Rect(self.x, self.y, 25, 25)\n\n def draw(self, s):\n \"\"\"Draws the img attribute to screen on the current x and y values\"\"\"\n self.update_rect()\n s.blit(self.img, (self.x, self.y))\n\n def move(self, s):\n \"\"\"x moves -2 for each game-loop to maintain pace with game background, so object appears static\"\"\"\n self.x -= 2\n self.draw(s)\n if self.x < -50:\n return True\n" }, { "alpha_fraction": 0.44504672288894653, "alphanum_fraction": 0.4643045961856842, "avg_line_length": 44.47494888305664, "blob_id": "347eee5e44be26d03dd85669b7afd750669e2dce", "content_id": "919e27fe7a2c23986ed0521fc98c1cc6b948bf9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22692, "license_type": "no_license", "max_line_length": 113, "num_lines": 499, "path": "/level.py", "repo_name": "MattMuelot/Trumpus", "src_encoding": "UTF-8", "text": "from game_classes import Trump, Declaration, Moneybag, Bill, SecretService, Heart\nimport pygame\nimport json\nfrom menu_screens import Menus\n\n\nclass Levels:\n \"\"\"Game level class. Pass as an argument the level number and the level will play out according to\n the level specified. \"\"\"\n def __init__(self, screen, clock, fps):\n with open('config_files/level_settings.json', 'r') as f:\n self.data = json.load(f)\n self.screen = screen\n self.clock = clock\n self.menu = Menus(self.screen, self.clock)\n self.FPS = fps\n self.trump = Trump()\n self.decs = []\n self.bags = []\n self.hearts = []\n self.agents = []\n self.bullets = []\n\n # ------------------ LOAD IN GLOBAL AUDIO FILES ---------------- #\n\n pygame.mixer.set_num_channels(32)\n self.moneycollect = pygame.mixer.Sound('Assets/moneycollect.ogg')\n self.burn = pygame.mixer.Sound('Assets/burn.ogg')\n self.wrong = pygame.mixer.Sound('Assets/wrong.ogg')\n self.throw = pygame.mixer.Sound('Assets/throw.ogg')\n self.shot = pygame.mixer.Sound('Assets/gunshot.ogg')\n self.okay = pygame.mixer.Sound('Assets/okay.ogg')\n self.scream = pygame.mixer.Sound('Assets/scream.ogg')\n self.jump = pygame.mixer.Sound('Assets/jump.ogg')\n self.collect_heart = pygame.mixer.Sound('Assets/collect_heart.ogg')\n\n # ------------------ LOAD IN GLOBAL IMAGES ----------------- #\n\n self.heart = pygame.image.load('Assets/heart.png').convert_alpha()\n self.bill = pygame.image.load('Assets/dollarbill-75-35.png').convert_alpha()\n self.bg1 = pygame.image.load('Assets/level1bg.png').convert_alpha()\n self.bg2 = pygame.image.load('Assets/level2bg.png').convert_alpha()\n self.bg3a = pygame.image.load('Assets/level3bg.png').convert_alpha()\n self.bg3b = pygame.image.load('Assets/level3bg2.png').convert_alpha()\n\n # ------------------ LOAD IN GLOBAL FONTS ----------------- #\n\n self.main_font = pygame.font.SysFont('comicsans', 50)\n\n # ------------------- RENAME ------------------- #\n\n self.obj_timer = 0\n self.i = 0\n self.running = True\n\n def level_one(self):\n \"\"\"Level One method\"\"\"\n self.decs = [Declaration() for _ in range(self.data['level_one']['declarations_per_wave'])]\n self.bags = [Moneybag() for _ in range(self.data['level_one']['money_bags_per_wave'])]\n pygame.mixer.music.load('Assets/level1.mp3')\n pygame.mixer.music.play(-1)\n while self.running:\n self.obj_timer += 1\n self.clock.tick(60) # Set FPS To 60\n self.screen.blit(self.bg1, (self.i, 0))\n self.screen.blit(self.bg1, (3200 + self.i, 0))\n self.screen.blit(self.bill, (10, 10))\n self.screen.blit(self.heart, (750, 10))\n money_label = self.main_font.render(f'${self.trump.money}', True, (0, 255, 0))\n heart_label = self.main_font.render(f'{self.trump.lives}', True, (255, 255, 255))\n self.screen.blit(money_label, (95, 12))\n self.screen.blit(heart_label, (715, 10))\n if self.obj_timer < 400:\n obj_label = self.main_font.render(f'Collect $30 To Advance', True, (0, 255, 0))\n self.screen.blit(obj_label, (225, 12))\n self.i -= 2\n if self.i == -3200:\n self.screen.blit(self.bg1, (3200 + self.i, 0))\n self.i = 0\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.mixer.music.stop()\n return 'quit'\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_j:\n if self.trump.jumping:\n pass\n else:\n self.trump.jumping = True\n self.jump.play()\n if event.key == pygame.K_SPACE:\n if self.trump.money > 0:\n self.trump.money_shot.append(Bill(self.trump))\n self.throw.play()\n self.trump.money -= 1\n for d in self.decs[:]:\n off_screen = d.move(self.screen)\n burn_result = d.draw(self.screen)\n if burn_result:\n self.decs.remove(d)\n if off_screen:\n self.decs.remove(d)\n for m in self.trump.money_shot[:]:\n if m.rect.colliderect(d.rect):\n if not d.timer:\n d.timer = True\n self.burn.play()\n self.trump.money_shot.remove(m)\n else:\n pass\n if d.rect.colliderect(self.trump.rect):\n if d.timer:\n pass\n else:\n self.decs.remove(d)\n self.wrong.play()\n self.trump.lives -= 1\n for b in self.bags[:]:\n off_screen = b.move(self.screen)\n if b.rect.colliderect(self.trump.rect):\n self.bags.remove(b)\n self.moneycollect.play()\n self.trump.money += 3\n if self.trump.money >= 30:\n pygame.mixer.music.stop()\n return 'win'\n if off_screen:\n self.bags.remove(b)\n for m in self.trump.money_shot[:]:\n off_screen = m.move(self.screen)\n if off_screen:\n self.trump.money_shot.remove(m)\n if len(self.decs) <= 0:\n self.trump.wave += 2\n self.decs = [Declaration() for _ in range(self.data['level_one']['declarations_per_wave'])]\n if len(self.bags) <= 0:\n self.bags = [Moneybag() for _ in range(self.data['level_one']['money_bags_per_wave'])]\n if self.trump.jumping:\n self.trump.jump(self.screen)\n if self.trump.lives <= 0:\n pygame.mixer.music.stop()\n return 'lost'\n self.trump.draw(self.screen)\n self.trump.move(self.screen)\n pygame.display.update()\n\n def level_two(self):\n self.trump.reset()\n self.obj_timer = 0\n self.decs = [Declaration() for _ in range(self.data['level_two']['declarations_per_wave'])]\n self.bags = [Moneybag() for _ in range(self.data['level_two']['money_bags_per_wave'])]\n self.agents = [SecretService() for _ in range(self.data['level_two']['secret_service_per_wave'])]\n pygame.mixer.music.load('Assets/level2.mp3')\n pygame.mixer.music.play(-1)\n while self.running:\n self.obj_timer += 1\n eta = self.clock.tick(60) # Set FPS To 60\n self.screen.blit(self.bg2, (self.i, 0))\n self.screen.blit(self.bg2, (3200 + self.i, 0))\n self.screen.blit(self.bill, (10, 10))\n self.screen.blit(self.heart, (750, 10))\n money_label = self.main_font.render(f'${self.trump.money}', True, (0, 255, 0))\n heart_label = self.main_font.render(f'{self.trump.lives}', True, (255, 255, 255))\n self.screen.blit(money_label, (95, 12))\n self.screen.blit(heart_label, (715, 10))\n if self.obj_timer < 400:\n obj_label = self.main_font.render(f'Collect $60 To Advance', True, (0, 255, 0))\n self.screen.blit(obj_label, (225, 12))\n self.i -= 2\n if self.i == -3200:\n self.screen.blit(self.bg1, (3200 + self.i, 0))\n self.i = 0\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.mixer.music.stop()\n return 'quit'\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_j:\n if self.trump.jumping:\n pass\n else:\n self.trump.jumping = True\n self.jump.play()\n if event.key == pygame.K_SPACE:\n if self.trump.money > 0:\n self.trump.money_shot.append(Bill(self.trump))\n self.throw.play()\n self.trump.money -= 1\n for d in self.decs[:]:\n off_screen = d.move(self.screen)\n burn_result = d.draw(self.screen)\n if burn_result:\n self.decs.remove(d)\n if off_screen:\n self.decs.remove(d)\n for m in self.trump.money_shot[:]:\n if m.rect.colliderect(d.rect):\n if not d.timer:\n d.timer = True\n self.burn.play()\n self.trump.money_shot.remove(m)\n else:\n pass\n if d.rect.colliderect(self.trump.rect):\n if d.timer:\n pass\n else:\n self.decs.remove(d)\n self.wrong.play()\n self.trump.lives -= 1\n for b in self.bags[:]:\n off_screen = b.move(self.screen)\n if b.rect.colliderect(self.trump.rect):\n self.bags.remove(b)\n self.moneycollect.play()\n self.trump.money += 3\n if self.trump.money >= 60:\n pygame.mixer.music.stop()\n return 'win'\n if off_screen:\n self.bags.remove(b)\n for a in self.agents[:]:\n off_screen = a.move(self.screen)\n a.counter -= eta\n if a.counter <= 0:\n if a.x <= 1000:\n a.shoot_hold = False\n a.shoot(self.bullets)\n a.counter += 4000\n a.shoot_hold = True\n self.shot.play()\n if off_screen:\n self.agents.remove(a)\n for m in self.trump.money_shot[:]:\n if m.rect.colliderect(a.rect):\n self.agents.remove(a)\n self.scream.play()\n self.trump.money_shot.remove(m)\n else:\n pass\n for b in self.bullets[:]:\n b.move(self.screen)\n if b.rect.colliderect(self.trump.rect):\n self.bullets.remove(b)\n self.okay.play()\n self.trump.lives -= 1\n for m in self.trump.money_shot[:]:\n off_screen = m.move(self.screen)\n if off_screen:\n self.trump.money_shot.remove(m)\n if len(self.decs) <= 0:\n self.trump.wave += 2\n self.decs = [Declaration() for _ in range(self.data['level_two']['declarations_per_wave'])]\n if len(self.bags) <= 0:\n self.bags = [Moneybag() for _ in range(self.data['level_two']['money_bags_per_wave'])]\n if len(self.agents) <= 0:\n self.agents = [SecretService() for _ in range(self.data['level_two']['secret_service_per_wave'])]\n if self.trump.jumping:\n self.trump.jump(self.screen)\n if self.trump.lives <= 0:\n pygame.mixer.music.stop()\n return 'lost'\n self.trump.draw(self.screen)\n self.trump.move(self.screen)\n pygame.display.update()\n\n def level_three(self):\n self.obj_timer = 0\n self.trump.reset()\n self.bullets = []\n self.decs = [Declaration() for _ in range(self.data['level_two']['declarations_per_wave'])]\n self.bags = [Moneybag() for _ in range(self.data['level_two']['money_bags_per_wave'])]\n self.agents = [SecretService() for _ in range(self.data['level_two']['secret_service_per_wave'])]\n pygame.mixer.music.load('Assets/level3.mp3')\n pygame.mixer.music.play(-1)\n while self.running:\n self.obj_timer += 1\n eta = self.clock.tick(60) # Set FPS To 60\n self.screen.blit(self.bg3a, (self.i, 0))\n self.screen.blit(self.bg3a, (3200 + self.i, 0))\n self.screen.blit(self.bill, (10, 10))\n self.screen.blit(self.heart, (750, 10))\n money_label = self.main_font.render(f'${self.trump.money}', True, (0, 255, 0))\n heart_label = self.main_font.render(f'{self.trump.lives}', True, (255, 255, 255))\n self.screen.blit(money_label, (95, 12))\n self.screen.blit(heart_label, (715, 10))\n if self.obj_timer < 400:\n obj_label = self.main_font.render(f'Kill 25 Agents To Advance', True, (0, 255, 0))\n self.screen.blit(obj_label, (225, 12))\n self.i -= 2\n if self.i == -3200:\n self.screen.blit(self.bg1, (3200 + self.i, 0))\n self.i = 0\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.mixer.music.stop()\n return 'quit'\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_j:\n if self.trump.jumping:\n pass\n else:\n self.trump.jumping = True\n self.jump.play()\n if event.key == pygame.K_SPACE:\n if self.trump.money > 0:\n self.trump.money_shot.append(Bill(self.trump))\n self.throw.play()\n self.trump.money -= 1\n for d in self.decs[:]:\n off_screen = d.move(self.screen)\n burn_result = d.draw(self.screen)\n if burn_result:\n self.decs.remove(d)\n if off_screen:\n self.decs.remove(d)\n for m in self.trump.money_shot[:]:\n if m.rect.colliderect(d.rect):\n if not d.timer:\n d.timer = True\n self.burn.play()\n self.trump.money_shot.remove(m)\n else:\n pass\n if d.rect.colliderect(self.trump.rect):\n if d.timer:\n pass\n else:\n self.decs.remove(d)\n self.wrong.play()\n self.trump.lives -= 1\n for b in self.bags[:]:\n off_screen = b.move(self.screen)\n if b.rect.colliderect(self.trump.rect):\n self.bags.remove(b)\n self.moneycollect.play()\n self.trump.money += 3\n if off_screen:\n self.bags.remove(b)\n for a in self.agents[:]:\n off_screen = a.move(self.screen)\n a.counter -= eta\n if a.counter <= 0:\n a.shoot_hold = False\n a.shoot(self.bullets)\n a.counter += 4000\n a.shoot_hold = True\n self.shot.play()\n if off_screen:\n self.agents.remove(a)\n for m in self.trump.money_shot[:]:\n if m.rect.colliderect(a.rect):\n self.agents.remove(a)\n self.scream.play()\n self.trump.money_shot.remove(m)\n self.trump.agents_left -= 1\n if self.trump.agents_left <= 0:\n return 'win'\n else:\n pass\n for b in self.bullets[:]:\n b.move(self.screen)\n if b.rect.colliderect(self.trump.rect):\n self.bullets.remove(b)\n self.okay.play()\n self.trump.lives -= 1\n for m in self.trump.money_shot[:]:\n off_screen = m.move(self.screen)\n if off_screen:\n self.trump.money_shot.remove(m)\n if len(self.decs) <= 0:\n self.trump.wave += 2\n self.decs = [Declaration() for _ in range(self.data['level_two']['declarations_per_wave'])]\n if len(self.bags) <= 0:\n self.bags = [Moneybag() for _ in range(self.data['level_two']['money_bags_per_wave'])]\n if len(self.agents) <= 0:\n self.agents = [SecretService() for _ in range(self.data['level_two']['secret_service_per_wave'])]\n if self.trump.jumping:\n self.trump.jump(self.screen)\n if self.trump.lives <= 0:\n pygame.mixer.music.stop()\n return 'lost'\n agent_label = self.main_font.render(f'Agents Left: {self.trump.agents_left}', True, (255, 255, 255))\n self.screen.blit(agent_label, (35, 550))\n self.trump.draw(self.screen)\n self.trump.move(self.screen)\n pygame.display.update()\n\n def level_one_victory(self):\n pygame.mixer.music.load('Assets/yankeedoodle.mp3')\n pygame.mixer.music.play(-1)\n win_label = self.main_font.render('LEVEL ONE COMPLETE', True, (0, 0, 0))\n win_label2 = self.main_font.render('PRESS SPACE TO CONTINUE', True, (0, 0, 0))\n self.i = 0\n self.running = True\n while self.running:\n self.clock.tick(self.FPS)\n self.screen.blit(self.bg1, (self.i, 0))\n self.screen.blit(self.bg1, (3200 + self.i, 0))\n self.screen.blit(win_label, (200, 240))\n self.screen.blit(win_label2, (150, 300))\n self.i -= 2\n if self.i == -3200:\n self.screen.blit(self.bg1, (3200 + self.i, 0))\n self.i = 0\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return 'quit'\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n return 'next'\n pygame.display.update()\n\n def level_two_victory(self):\n pygame.mixer.music.load('Assets/yankeedoodle.mp3')\n pygame.mixer.music.play(-1)\n win_label = self.main_font.render('LEVEL TWO COMPLETE', True, (0, 0, 0))\n win_label2 = self.main_font.render('PRESS SPACE TO CONTINUE', True, (0, 0, 0))\n self.i = 0\n self.running = True\n while self.running:\n self.clock.tick(self.FPS)\n self.screen.blit(self.bg2, (self.i, 0))\n self.screen.blit(self.bg2, (3200 + self.i, 0))\n self.screen.blit(win_label, (200, 240))\n self.screen.blit(win_label2, (150, 300))\n self.i -= 2\n if self.i == -3200:\n self.screen.blit(self.bg2, (3200 + self.i, 0))\n self.i = 0\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return 'quit'\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n return 'next'\n pygame.display.update()\n\n def level_three_victory(self):\n pygame.mixer.music.load('Assets/yankeedoodle.mp3')\n pygame.mixer.music.play(-1)\n win_label = self.main_font.render('LEVEL THREE COMPLETE', True, (0, 0, 0))\n win_label2 = self.main_font.render('PRESS SPACE TO CONTINUE', True, (0, 0, 0))\n self.i = 0\n self.running = True\n while self.running:\n self.clock.tick(self.FPS)\n self.screen.blit(self.bg3a, (self.i, 0))\n self.screen.blit(self.bg3a, (3200 + self.i, 0))\n self.screen.blit(win_label, (200, 240))\n self.screen.blit(win_label2, (150, 300))\n self.i -= 2\n if self.i == -3200:\n self.screen.blit(self.bg3a, (3200 + self.i, 0))\n self.i = 0\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return 'quit'\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n return 'next'\n pygame.display.update()\n\n def main_game_loop(self):\n self.running = True\n while self.running:\n menu_result = self.menu.main_menu()\n if menu_result == 'settings':\n settings_result = self.menu.settings()\n if settings_result == 'menu':\n continue\n if settings_result == 'quit':\n self.running = False\n if menu_result == 'start':\n level_one_result = self.level_one()\n if level_one_result == 'win':\n postgame_one = self.level_one_victory()\n if postgame_one == 'next':\n level_two_result = self.level_two()\n if level_two_result == 'win':\n postgame_two = self.level_two_victory()\n if postgame_two == 'next':\n level_three_result = self.level_three()\n if level_three_result == 'win':\n continue # TODO you win screen for now, final boss in future\n if level_three_result == 'lost':\n pass # TODO lose screen\n if postgame_two == 'quit':\n self.running = False\n if level_two_result == 'lost':\n pass # TODO lose screen\n if postgame_one == 'quit':\n self.running = False\n if level_one_result == 'lost':\n pass # TODO lose screen\n if level_one_result == 'quit':\n self.running = False\n if menu_result == 'quit':\n self.running = False\n" } ]
5
MarekPas/Steemnova_stats
https://github.com/MarekPas/Steemnova_stats
4250fd6e29d98817635816048845be79e15ef0da
1b687071357c2eaa632c8ea82582f6bc9ba413c5
da1abbb527ba99c283cc1bd5b54199498da326f0
refs/heads/master
2021-01-03T20:58:07.172929
2020-09-28T13:32:25
2020-09-28T13:32:25
240,233,781
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5990637540817261, "alphanum_fraction": 0.6299341917037964, "avg_line_length": 34.60360336303711, "blob_id": "65b4e64b9d6f0bc978ea28a58d011fe5d0aeef2c", "content_id": "0f84b2cbc419c69d7d7cd90a1b4c7cf2747471cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7904, "license_type": "no_license", "max_line_length": 220, "num_lines": 222, "path": "/generator.py", "repo_name": "MarekPas/Steemnova_stats", "src_encoding": "UTF-8", "text": "import mysql.connector\nimport time\nfrom databases import config, local_config\nfrom datetime import date, timedelta\n\ntable = \"sn1_users\"\n\ndef importer(table, indexer=\"max\"):\n mycursor = sql.cursor(buffered=True)\n mycursor.execute(f\"DELETE FROM {table} WHERE `{today}` is null\")\n sql.commit()\n if indexer == \"min\":\n mycursor.execute(f\"SELECT sn1_users.name, {table}.`{today}` - {table}.`{yesterday}` AS result FROM sn1_users JOIN {table} ON sn1_users.id = {table}.id ORDER BY COALESCE(result, 0) ASC LIMIT 1\")\n result = mycursor.fetchone()\n elif indexer == \"max\":\n mycursor.execute(f\"SELECT sn1_users.name, {table}.`{today}` - {table}.`{yesterday}` AS result FROM sn1_users JOIN {table} ON sn1_users.id = {table}.id ORDER BY COALESCE(result, 0) DESC LIMIT 1\")\n result = mycursor.fetchone()\n return result[0].strip(), result[1]\n\ndef string_with_at(ls, newline=False): #adding \"@\" to players who has no \" \" in nickname\n result = \"\"\n if newline:\n for name in ls:\n if \" \" in name:\n result += \" \" + name[0].strip() + \"\\n\"\n else:\n result += \" @\" + name[0].strip() + \"\\n\"\n else:\n for name in ls:\n if \" \" in name:\n result += \" \" + name[0].strip() + \",\"\n else:\n result += \" @\" + name[0].strip() + \",\"\n return result\n\ndef new_users():\n mycursor.execute(f\"SELECT `name` FROM sn1_users WHERE `{yesterday}` is NULL\")\n new_ = ()\n for i in mycursor.fetchall():\n new_ += i\n new_players = string_with_at(new_, newline=True)\n if new_players == \"\":\n new_players = \"No new players today\"\n print(\"No new players today\\n\")\n else:\n print(\"New players:\\n\", new_players)\n return new_players\n\ndef deleted_players():\n delcursor = sql.cursor()\n delcursor.execute(f\"SELECT name FROM {table} WHERE `{today}` IS Null\")\n d = delcursor.fetchall()\n deleted = string_with_at(d, newline=True)\n if deleted == \"\":\n deleted = \"Noone left us today\"\n print(\"Noone left us today\\n\")\n else:\n print(\"Deleted players:\\n\", deleted)\n with open(\"E:/steemnova/deleted.txt\", \"a\") as fd:\n fd.write(f\"{today}\\n{deleted}\")\n delcursor.execute(f\"DELETE FROM {table} WHERE `{today}` IS Null\")\n sql.commit()\n return deleted\n\ndef check_last_previous_day():\n succes = False\n day = 1\n while succes == False:\n try:\n yesterday = str(date.today() - timedelta(days=day))\n cursor = sql.cursor(buffered=True)\n cursor.execute(f\"SELECT `{yesterday}` FROM sn1_users\")\n succes = True\n except:\n day += 1\n continue\n print(f\"Last update was {day} day(s) ago.\")\n return yesterday\n\ndef warning_players():\n time_ms = int(time.time())\n days_ms = time_ms - 7689600 # 89 days\n sql = mysql.connector.connect(**config)\n mycursor = sql.cursor(buffered=True)\n mycursor.execute(f\"select username from uni1_users where onlinetime <={days_ms}\")\n result = mycursor.fetchall()\n sql.close()\n warned = \"\"\n if len(result) > 0:\n warned = string_with_at(result)\n warned = warned.strip(\",\")\n warned = warned.lstrip(\" \")\n warned += \" you may be deleted soon due to inactivity. Maybe it's time to come back to the game? :)\"\n print(\"Warning: \", warned)\n return warned\n\ndef vacations():\n sql = mysql.connector.connect(**config)\n vac_cursor = sql.cursor()\n vac_cursor.execute(\"select count(*) from uni1_users where urlaubs_modus=1\")\n holidays = vac_cursor.fetchone()[0]\n vac_cursor.execute(\"select count(*) from uni1_users\")\n users = vac_cursor.fetchone()[0]\n print(\"Players:\", users, \"On vacation:\", holidays)\n sql.close()\n return users, holidays\n\nsql = mysql.connector.connect(**local_config)\nmycursor = sql.cursor(buffered=True)\n\ntoday = str(date.today())\nyesterday = check_last_previous_day()\n\n# Top 3 earners\nmycursor.execute(f\"SELECT name, `{today}` - `{yesterday}` AS result FROM {table} order by result DESC limit 3\")\ntop = mycursor.fetchall()\nprint(top[0][0], top[1][0], top[2][0])\n\n# Top 3 losers\nmycursor.execute(f\"SELECT name, `{today}` - `{yesterday}` AS result FROM {table} WHERE `{yesterday}` IS NOT NULL order by COALESCE(result, 0) ASC LIMIT 3\")\nbottom = mycursor.fetchall()\nprint(bottom[0][0], bottom[1][0], bottom[2][0])\n\nmycursor.execute(f\"SELECT AVG(`{today}`) FROM sn1_users \")\naverage = int(mycursor.fetchone()[0])\nprint(\"Average:\", average)\n\nnew_players = new_users()\ndeleted_players = deleted_players()\n\n# DESTROYER\ndestroyer_name, destroyer_score = importer(\"sn1_destroyer\", \"max\")\ndestroyer_score = int(destroyer_score / 1000)\nprint(\"Destroyer:\", destroyer_name, destroyer_score)\n\n# FLEET BUILDER\nbuilder_name, builder_score = importer(\"sn1_fail\", \"max\")\nprint(\"Fleet Builder:\", builder_name, builder_score)\n\n# BUNKER\nbunker_name, bunker_score = importer(\"sn1_bunker\", \"max\")\nprint(\"Bunkerman:\", bunker_name, bunker_score)\n\n# AGRESOR\nagresor_name, agresor_score = importer(\"sn1_agresor\", \"max\")\nprint(\"Agresor:\", agresor_name, agresor_score)\n\n# FARM\nfarma_name, farma_score = importer(\"sn1_farm\", \"max\")\nprint(\"Farma:\", farma_name, farma_score)\n\n# FAIL\nfail_name, fail_score = importer(\"sn1_fail\", \"min\")\nprint(\"Fail of the day:\", fail_name, fail_score)\n\nsql.close()\n\nusers, holidays = vacations()\nwarned = warning_players()\n\nday = time.strftime('%d.%m.%Y')\n\nwith open(\"E:/steemnova/result.txt\", \"w\") as plik:\n plik.write(f\"\"\"\n<center><p>SteemNova - Daily statistics and achievements {day}</p>\nhttps://static.xx.fbcdn.net/images/emoji.php/v9/tbd/1/28/1f4f6.png Daily statistics for <a href=\"https://steemnova.intinte.org/\"><b>SteemNova</b></a> https://static.xx.fbcdn.net/images/emoji.php/v9/tbd/1/28/1f4f6.png</b>\n \n</br>\n<b>Players:</b> {users}\nOn vacation: {holidays}\nAverage points: {average}</br>\n<b>New players:</b>\n{new_players}\n<b>Deleted players:</b>\n{deleted_players}</br>\n\n<b>Top earners of the day:</b>\nPosition | Player | Points\n- | ------------ | -------------\n1.|@{top[0][0]}|+{top[0][1]}\n2.|@{top[1][0]}|+{top[1][1]}\n3.|@{top[2][0]}|+{top[2][1]}\n\n<b>Top losers of the day:</b>\nPosition | Player | Points\n- | ------------ | -------------\n1.|@{bottom[0][0]}|{bottom[0][1]}\n2.|@{bottom[1][0]}|{bottom[1][1]}\n3.|@{bottom[2][0]}|{bottom[2][1]}\n</br>\n<center><h2>https://static.xx.fbcdn.net/images/emoji.php/v9/t9f/1/28/1f3c6.png Achievements https://static.xx.fbcdn.net/images/emoji.php/v9/t9f/1/28/1f3c6.png </h2></center>\n\n\nDestroyer of the day | Player | Destroyed Fleet Points\n-- | ------------ | ------------ \nhttps://media.tenor.co/images/89ba44847971c53223704fe9323caacb/tenor.gif |@{destroyer_name}| <center>{destroyer_score}</center>\n\nFleet Builder of the day | Player | Fleet points\n-- | ------------ | ------------ \nhttps://media.tenor.co/images/f4fe55de834960c603f09e1fea6a156d/tenor.gif |@{builder_name}|<center>+{builder_score}</center>\n\nBunkerman of the day | Player | Defense Points\n-- | ------------ | ------------ \nhttps://media.tenor.co/images/940e525e4033fedc6910515f386d3902/tenor.gif |@{bunker_name}| <center>+{bunker_score}</center>\n\nAgressor of the day | Player | Battles won\n-- | ------------ | ------------ \nhttps://media.tenor.co/images/f7b498a905f3e8c964ad5d97bf176e1f/tenor.gif |@{agresor_name}|<center>{agresor_score}</center>\n\nFarm of the day | Player | Battles lost\n-- | ------------ | ------------ \nhttps://media.tenor.co/images/6a28bad348a6d006ddfb25aea8c166da/tenor.gif |@{farma_name}| <center>{farma_score}</center>\n\nEpic fail of the day | Player | Fleet Points\n-- | ------------ | ------------ \nhttps://media.tenor.co/images/0cc3ca22b2720ecc97d6f9ce6fd357bc/tenor.gif |@{fail_name}| <center>{fail_score}</center>\n\n{warned}\n</center>\n<center>https://steemnova.intinte.org/</center>\nhttps://steemnova.intinte.org/styles/resource/images/meta.png\n\"\"\")\n" }, { "alpha_fraction": 0.6000528931617737, "alphanum_fraction": 0.6063970327377319, "avg_line_length": 37.591835021972656, "blob_id": "126c43ddbfeacc7f05f8ac4399f5f1b6e623f2b3", "content_id": "e6d67de3287af19fc24fbf249abc3cbe62ac72be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3783, "license_type": "no_license", "max_line_length": 111, "num_lines": 98, "path": "/import.py", "repo_name": "MarekPas/Steemnova_stats", "src_encoding": "UTF-8", "text": "import mysql.connector\nfrom mysql.connector import errorcode\nfrom databases import config, local_config\nimport datetime\n\n\ndef updater(loc_table_name, table_name, id_, select, stat_type=None):\n if stat_type == 1:\n mycursor.execute(f\"SELECT {id_},{select} FROM {table_name} WHERE stat_type={stat_type} ORDER BY {id_}\")\n else:\n mycursor.execute(f\"SELECT {id_},{select} FROM {table_name} ORDER BY {id_}\")\n points = mycursor.fetchall()\n try:\n mycursor_loc.execute(f\"ALTER TABLE `{loc_table_name}` ADD COLUMN `{today}` BIGINT NULL AFTER `id`;\")\n print(\"Import\", loc_table_name, \"started\")\n except:\n print(\"Update\", loc_table_name, \"started\")\n finally:\n for idek, point in points:\n if idek <= maxnewid:\n## Uncomment two lines below and comment UPDATE line only for first insert\n## mycursor_loc.execute(f\"INSERT INTO {loc_table_name} VALUES({idek}, {point})\")\n## print(f\"{idek} {point}\")\n mycursor_loc.execute(f\"UPDATE {loc_table_name} SET `{today}` = {point} WHERE id={idek}\")\n sql_loc.commit()\n else:\n mycursor_loc.execute(f\"INSERT INTO {loc_table_name} (id, `{today}`) VALUES({idek},{point})\")\n sql_loc.commit()\n print(\"Import/update\", loc_table_name, \"finished!\")\n\n\ndate = datetime.datetime.now()\ntoday = (date.strftime(\"%Y-%m-%d\"))\nprint(today)\nusers_table = \"sn1_users\"\n\ntry:\n sql_loc = mysql.connector.connect(**local_config)\n mycursor_loc = sql_loc.cursor(buffered=True)\n print(\"Connection local database succesful!\")\n mycursor_loc.execute(f\"SELECT id FROM {users_table} ORDER BY id\")\n ids = mycursor_loc.fetchall()\n maxnewid = ids[len(ids) - 1][0]\n\n sql = mysql.connector.connect(**config)\n mycursor = mycursor2 = sql.cursor(buffered=True)\n print(\"Connection steemnova database succesful!\")\n mycursor.execute(\"SELECT id_owner,total_points FROM uni1_statpoints WHERE stat_type=1 ORDER BY id_owner\")\n points = mycursor.fetchall()\n\n # POINTS\n try:\n mycursor_loc.execute(f\"ALTER TABLE `{users_table}` ADD COLUMN `{today}` INT NULL AFTER `name`;\")\n print(f\"Import {users_table} started\")\n except:\n print(f\"UPDATE {users_table} started\")\n finally:\n for idek, point in points:\n if idek <= maxnewid:\n mycursor_loc.execute(f\"UPDATE {users_table} SET `{today}` = {point} WHERE id={idek}\")\n sql_loc.commit()\n else:\n # adding new players to the table\n mycursor2.execute(f\"SELECT username FROM uni1_users WHERE id={idek}\")\n name = mycursor2.fetchall()[0][0]\n mycursor_loc.execute(\n f\"INSERT INTO {users_table} (id, name, `{today}`) VALUES({idek},\\\"{name}\\\",{point})\")\n print(f\"Added to database: {idek} {name} {point}\")\n sql_loc.commit()\n print(f\"Import {users_table} finished!\")\n\n # AGRESOR\n updater(\"sn1_agresor\", \"uni1_users\", \"id\", \"wons\")\n\n # BUNKER\n updater(\"sn1_bunker\", \"uni1_statpoints\", \"id_owner\", \"defs_points\", stat_type = 1)\n\n # DESTROYER\n updater(\"sn1_destroyer\", \"uni1_users\", \"id\", \"desunits\")\n\n # FAIL and BUILDER\n updater(\"sn1_fail\", \"uni1_statpoints\", \"id_owner\", \"fleet_points\", stat_type = 1)\n\n # FARM\n updater(\"sn1_farm\", \"uni1_users\", \"id\", \"loos\")\n\n print(\"Import completed\")\n \nexcept mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"Database does not exist\")\n else:\n print(err)\nelse:\n sql.close()\n sql_loc.close()\n\n" }, { "alpha_fraction": 0.4618473947048187, "alphanum_fraction": 0.4618473947048187, "avg_line_length": 15.600000381469727, "blob_id": "733853ee5972f6abcf4da3645be81b6f4c628593", "content_id": "d13848ea794d10d2b64a165bed98644ea660f99f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 249, "license_type": "no_license", "max_line_length": 29, "num_lines": 15, "path": "/databases.py", "repo_name": "MarekPas/Steemnova_stats", "src_encoding": "UTF-8", "text": "local_config = {\n 'user': '',\n 'password': '',\n 'host': 'localhost',\n 'database': 'sys',\n 'raise_on_warnings': True\n}\n\nconfig = {\n 'user': '',\n 'password': '',\n 'host': '',\n 'database': '',\n 'raise_on_warnings': True\n}\n" }, { "alpha_fraction": 0.7776427865028381, "alphanum_fraction": 0.7800729274749756, "avg_line_length": 34.78260803222656, "blob_id": "9ea9fb1f49c5b9ffd13f4396f737c4075aec1ffa", "content_id": "499f2f04082e6dbe7b983806e173dffe7cf81c9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 823, "license_type": "no_license", "max_line_length": 153, "num_lines": 23, "path": "/README.md", "repo_name": "MarekPas/Steemnova_stats", "src_encoding": "UTF-8", "text": "# Steemnova_stats\nDaily statistics for SteemNova game - https://steemnova.intinte.org/.\n\nYou can check results on:\n* https://ecency.com/@sentipl\n* https://peakd.com/@sentipl\n* https://steemit.com/@sentipl (not updating anymore).\n\n## Description\nScripts to import and analyze statistics of SteemNova players. It's a game based on Classic OGame engine.\n\n## Technologies\n* Python 3.7\n* MySQL database\n* mysql.connector library\n\n## Setup\nYou need access to Ogame database (at least SELECTS). Login details enter into config dicitonary in databases.py.\nCreate local database and enter login details into local_config dictionary in databases.py. To create database use commands from create_databse.txt file.\nExecute import.py and generator.py afterwards. The post is generated to result.txt file.\n\n## Credits\nCreated by MarekPas\n" } ]
4
junaid1460/dirwatch
https://github.com/junaid1460/dirwatch
e1e61486eaa00a9b19f0dce758be89663722792a
2818c216768079e2646da1e2ea820bfe7991a713
70cc386c520093b7903fb615306c16f1bb6a761e
refs/heads/master
2020-03-07T13:57:13.587555
2018-03-31T08:42:53
2018-03-31T08:42:53
127,514,764
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.49886104464530945, "alphanum_fraction": 0.5626423954963684, "avg_line_length": 26.4375, "blob_id": "c93020cc37a93e41a6635ef671dc08ae48d96269", "content_id": "c0c48d1529599ce5162f437ca6b284ba2fcdc53b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 439, "license_type": "no_license", "max_line_length": 62, "num_lines": 16, "path": "/front-end/src/app/app.env.ts", "repo_name": "junaid1460/dirwatch", "src_encoding": "UTF-8", "text": "export const env = {\n firebase : {\n apiKey: 'AIzaSyBSUDOmIOPw0GqLd_bqBzrDYH4S0jXCdqo',\n authDomain: 'cgproject-631d0.firebaseapp.com',\n databaseURL: 'https://cgproject-631d0.firebaseio.com',\n projectId: 'cgproject-631d0',\n storageBucket: 'cgproject-631d0.appspot.com',\n messagingSenderId: '649844661943'\n },\n collections: {\n companies : 'companies'\n },\n app: {\n name : 'placements'\n }\n};\n" }, { "alpha_fraction": 0.7194106578826904, "alphanum_fraction": 0.7194106578826904, "avg_line_length": 25.457626342773438, "blob_id": "a05122bc8d3d4eaf3e477d292ea03828a4f304b8", "content_id": "4c9c617c4762565f1920a62616d39978a4848a40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1561, "license_type": "no_license", "max_line_length": 91, "num_lines": 59, "path": "/front-end/src/app/app.module.ts", "repo_name": "junaid1460/dirwatch", "src_encoding": "UTF-8", "text": "import { BrowserModule } from '@angular/platform-browser';\nimport { BrowserAnimationsModule} from '@angular/platform-browser/animations';\nimport { NgModule } from '@angular/core';\n\n\nimport { ServiceWorkerModule } from '@angular/service-worker';\nimport { AppComponent} from './app.component';\n\nimport { environment } from '../environments/environment';\n\nimport { MatToolbarModule, MatButtonModule, MatListModule, MatIconModule,\n MatInputModule, MatCardModule, MatDialogModule } from '@angular/material';\n\nimport { Route, RouterModule } from '@angular/router';\nimport { SubscriptionComponent , ImageDialog} from './subscription/subscription.component';\nimport { AboutComponent } from './about/about.component';\nimport { AppService} from './app.service'\nimport { HttpModule } from '@angular/http'\nconst mat = [\n MatToolbarModule, MatButtonModule, MatListModule, MatInputModule,\n MatIconModule, MatCardModule, MatDialogModule\n]\n\n\nconst routes: Route [] = [\n {\n path:'',\n component : SubscriptionComponent,\n pathMatch:'full'\n },\n {\n path: 'about',\n component: AboutComponent\n }\n]\n\n\n\n\n@NgModule({\n declarations: [\n AppComponent,\n SubscriptionComponent,\n AboutComponent,\n ImageDialog\n ],\n imports: [\n BrowserModule,\n RouterModule.forRoot(routes),\n HttpModule,\n BrowserAnimationsModule,\n ServiceWorkerModule.register('/ngsw-worker.js', { enabled: environment.production }),\n ...mat\n ],\n providers: [AppService ],\n bootstrap: [AppComponent],\n entryComponents: [ImageDialog]\n})\nexport class AppModule { }\n" }, { "alpha_fraction": 0.6285714507102966, "alphanum_fraction": 0.6380952596664429, "avg_line_length": 19.60784339904785, "blob_id": "c89dda94e7b9f3cf39bafed948d499c0bb0e04de", "content_id": "5acdb35ba262ea3f6517c90f15d6b5e9ea0537df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1050, "license_type": "no_license", "max_line_length": 75, "num_lines": 51, "path": "/front-end/src/app/subscription/subscription.component.ts", "repo_name": "junaid1460/dirwatch", "src_encoding": "UTF-8", "text": "import { Component, OnInit, Inject } from '@angular/core';\nimport {MatDialog, MatDialogRef, MAT_DIALOG_DATA} from '@angular/material';\nimport { AppService } from '../app.service'\n@Component({\n selector: 'app-subscription',\n templateUrl: './subscription.component.html',\n styleUrls: ['./subscription.component.css']\n})\nexport class SubscriptionComponent implements OnInit {\n days:number = 1;\n add(){\n this.days += 1;\n }\n remove(){\n this.days -=1\n this.days = this.days < 1? 1:this.days;\n }\n constructor(public aps:AppService, private dl: MatDialog) { \n // aps.fetch()\n }\n xs = [0,]\n ngOnInit() {\n }\n\n show(image){\n this.dl.open(ImageDialog, {data : {\n image: image\n }})\n }\n\n}\n\n\n\n@Component({\n selector: 'image-dialog',\n template: `\n <img src=\"{{data.image}}\" style=\" max-height:80vh;max-width:60vw\"/>\n `\n})\nexport class ImageDialog {\n\n constructor(\n public dialogRef: MatDialogRef<ImageDialog>,\n @Inject(MAT_DIALOG_DATA) public data: any) { }\n\n onNoClick(): void {\n this.dialogRef.close();\n }\n\n}" }, { "alpha_fraction": 0.6733046174049377, "alphanum_fraction": 0.6797631978988647, "avg_line_length": 24.46575355529785, "blob_id": "bfcd24e8b92a6099fdc6faeb49ed720accdddfca", "content_id": "90f8d67a568d9300bc160d211ba5f0fe06111628", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1858, "license_type": "no_license", "max_line_length": 87, "num_lines": 73, "path": "/main.py", "repo_name": "junaid1460/dirwatch", "src_encoding": "UTF-8", "text": "from flask import Flask, send_from_directory, jsonify\nfrom flask import request, render_template\nimport os\nfrom glob import glob\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileCreatedEvent, FileModifiedEvent, FileSystemEventHandler\nimport random\n\n\n\napp_dir = os.path.dirname(os.path.realpath(__file__))\nmedia_dir = \"./pi/\"\nmedia_url = '/media/'\nmedia_api_url = '/pics/'\ntemplate_dir = os.path.join(app_dir , 'build/')\napp = Flask(__name__, template_folder=template_dir, static_url_path='/static/')\n\n# filechages handler\n\nmedia_files = []\nmedia_hash = None\n\n\n \n\ndef handleChanges(event):\n global media_files, media_hash\n exts = ['*.png', '*.jpg', '*.jpeg']\n media_files = []\n for ext in exts:\n media_files.extend(glob(media_dir + ext))\n media_files.sort(key = os.path.getmtime)\n media_files.reverse()\n media_files = list(map(lambda x: media_url + x.split('/')[-1], media_files))\n tmp = random.randint(100, 1000000)\n if tmp == media_hash:\n media_hash += tmp\n else:\n media_hash = tmp\n\nwith app.app_context():\n handleChanges(0)\n\nhandler = FileSystemEventHandler()\nhandler.on_created = handleChanges\nhandler.on_modified = handleChanges\nobserver = Observer()\nobserver.schedule(handler, media_dir)\nobserver.start()\n\[email protected]('/')\ndef home():\n return render_template('index.html')\n\[email protected]('/about')\ndef about():\n return render_template('index.html')\n\[email protected]('/static/<path:path>')\ndef files(path):\n # print('hello')\n return send_from_directory(template_dir, path)\n\n\[email protected](media_url + '<path:filename>')\ndef media(filename):\n return send_from_directory(media_dir, filename)\n\[email protected](media_api_url+'<path:hash>')\ndef medias(hash):\n if str(media_hash) == hash:\n return jsonify({})\n return jsonify({'hash': media_hash, 'files' : media_files})" }, { "alpha_fraction": 0.5542168617248535, "alphanum_fraction": 0.5622490048408508, "avg_line_length": 20.65217399597168, "blob_id": "396b95b2304ecff8039f2152c0544da6fdbb0e29", "content_id": "56f8d8e5090e1360f4c96afa50ff4732cde5fa9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 498, "license_type": "no_license", "max_line_length": 55, "num_lines": 23, "path": "/front-end/src/app/app.service.ts", "repo_name": "junaid1460/dirwatch", "src_encoding": "UTF-8", "text": "import { Injectable } from '@angular/core';\nimport { Http } from '@angular/http'\n\n@Injectable()\nexport class AppService {\n hash = 'random'\n files =[]\n constructor(private http: Http) { \n this.fetch()\n }\n fetch(){\n // console.log('/pics/' + this.hash)\n this.http.get('/pics/' + this.hash).subscribe(e =>{\n let tmp = e.json()\n if(tmp.hash){\n this.hash = tmp.hash\n this.files = tmp.files;\n }\n setTimeout(this.fetch.bind(this), 1000)\n \n })\n }\n}\n" } ]
5
barentsen/kepler-skye
https://github.com/barentsen/kepler-skye
4d3227a767c92d316d6fc15b4d71fd4227644f2d
ed30f7b3594a6a5974d5d4636736021eceb4f880
2ab8721399cae970ea42b8906648767167e7ea5b
refs/heads/master
2021-01-11T07:10:43.247053
2017-01-10T19:09:58
2017-01-10T19:09:58
72,425,105
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7302672266960144, "alphanum_fraction": 0.7781230807304382, "avg_line_length": 29.94230842590332, "blob_id": "1f9aeafa31f37a3269ecb98efa2c8a9e86e82487", "content_id": "90dacb6aae474bbc360d3414efb24f43d8859f51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1609, "license_type": "no_license", "max_line_length": 79, "num_lines": 52, "path": "/README.md", "repo_name": "barentsen/kepler-skye", "src_encoding": "UTF-8", "text": "# Kepler's Skye Planet Candidate Metric\n\n***Identifies suspicious clusters of transits in time and space.***\n\n## Introduction\n\nSome of the CCD's on board the planet-hunting Kepler spacecraft\nare known to be sensitive to thermal changes in the on-board electronics;\nproducing 'rolling band' artefacts which may introduce transit-like\nsignals in the lightcurves of stars.\n\nAn effective way to identify the transits that are likely due to this\ntype of artefact, is to look for clusters of transits at the same time\non the same CCD chip. This is what the so-called 'Skye metric' does\nas part of the Kepler DR25 planet candidate vetting effort.\n\nThis repository contains the scripts used to identify the (time, ccd) pairs\nduring which the Kepler pipeline detected a suspicious number of transits,\nwhere 'suspicious' is quantified by means of the inferred binomial distribution\nof the observed frequency of transits produced by reliable planet candidates.\n\n\n## Usage\n\nThe metric is implemented as a 4-step procedure, i.e. as 4 Python scripts,\nlocated in the `scripts` folder of this repository. The folder contains a\n`Makefile` which captured how the scripts were ran.\n\n\n## Output\n\nThe key output is the file called `output/ops-bin0.50-p1e-04-definition.txt`\nwhich looks like this:\n\n```\n# This file specifies the times (floored bkjd) and skygroups during which\n# an anomalous number of long-period (>50 days) transits were detected.\n#\n# bkjd skygroup\n131.50 36\n131.50 59\n132.50 56\n133.50 41\n133.50 43\n133.50 67\n134.00 43\n134.00 78\n135.00 54\netc...\n```\n\nThis file was used as input into the Kepler DR25 RoboVetter.\n" }, { "alpha_fraction": 0.6133155226707458, "alphanum_fraction": 0.6208946108818054, "avg_line_length": 49.977272033691406, "blob_id": "0df665683582085dd883aaa06c7d8f2dc7f91c02", "content_id": "c943c682b0829bea1cd0817f182c1d1d45164448", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6729, "license_type": "no_license", "max_line_length": 112, "num_lines": 132, "path": "/scripts/1-prepare-transit-tables.py", "repo_name": "barentsen/kepler-skye", "src_encoding": "UTF-8", "text": "\"\"\"Creates a table of long-period/low-mes TCE transits detected by the Kepler pipeline.\n\nThis script takes a txt or csv table of TCEs (e.g. 'TCEs.txt') created by the\nKepler pipeline and turns it into a table of transits, detailing the time,\nquarter, season, ccd channel, skygroup, etc of each observable transit.\nThis table of transits is intended to allow the frequency of transits as a\nfunction of time and CCD to be investigated, allowing time intervals and detectors\nto be identified that produce a spurious number of transits at a given time.\nThis has been found to be effective at identifying epochs affected by\nthermal changes in the spacecraft electronics.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\n# A few useful constants\nKEPLER_BEGIN_BK, KEPLER_END_BK = 130, 1582\nKEPLER_QUARTERS = pd.read_csv('../data/kepler-quarters.csv')\n\n\ndef mjd2quarter(mjd):\n \"\"\"Returns the Kepler quarter for a given Modified Julian Day.\"\"\"\n mask = (KEPLER_QUARTERS.first_lc_mjd < mjd+0.01) & (KEPLER_QUARTERS.last_lc_mjd > mjd-0.01)\n if mask.any():\n return KEPLER_QUARTERS.loc[mask, 'quarter'].values[0]\n return None\n\ndef mjd2season(mjd):\n \"\"\"Returns the Kepler season for a given Modified Julian Day.\"\"\"\n mask = (KEPLER_QUARTERS.first_lc_mjd < mjd+0.01) & (KEPLER_QUARTERS.last_lc_mjd > mjd-0.01)\n if mask.any():\n return KEPLER_QUARTERS.loc[mask, 'season'].values[0]\n return None\n\ndef bkjd_to_mjd_approximate(bkjd):\n \"\"\"Inexact conversion from Barycentric Kepler Julian Date (BKJD) to Modified Julian Date (MJD).\n\n Inexact because it ignores the TIMECORR and TIMSLICE corrections.\n \"\"\"\n return bkjd + 2454833 - 2400000.5\n\n\ndef make_transit_table(tce_input_fn, robovetter_input_fn=None,\n min_period=50, max_mes=9e99):\n # Read the TCE table\n if tce_input_fn.endswith('txt'):\n columns = ['tce', 'kic', 'pn', 'period', 'epoch', 'mes', 'depth', 'duration',\n 'rplanet', 'rstar', 'tstar', 'logg', 'a', 'radratio', 'arstar', 'snr', 'srad']\n tcedf_tmp = pd.read_fwf(tce_input_fn, comment='#', names=columns)\n else:\n tcedf_tmp = pd.read_csv(tce_input_fn)\n\n if robovetter_input_fn is None:\n tcedf = tcedf_tmp\n #tcedf['disposition'] = [None] * len(tcedf)\n else:\n # Add preliminary robovetter output to have best-effort dispositions\n robovetter_columns = ['tce', 'score', 'disposition', 'not_transit_like_flag',\n 'significant_secondary_flag', 'centroid_offset_flag',\n 'ephemeris_match_flag', 'minor_descriptive_flag']\n robovetterdf = pd.read_fwf(robovetter_input_fn, comment='#', names=robovetter_columns)\n\n # Both tcedf_tmp and robovetterdf should have the same number of TCEs\n tcedf = pd.merge(tcedf_tmp, robovetterdf, on='tce')\n # Sanity checks\n #assert(len(tcedf_tmp) == len(robovetterdf))\n #assert(len(tcedf) == len(tcedf_tmp))\n\n # Convert the catalog of TCEs into a catalog of LONG-PERIOD TRANSITS\n mask = (tcedf.period > min_period) & (tcedf.mes < max_mes)\n print('Selected {} out of {} TCEs using period and mes cut.'.format(mask.sum(), len(tcedf)))\n transitrows = []\n for mytce in tqdm(tcedf[mask].itertuples(), desc='Identifying transits from {}'.format(tce_input_fn)):\n mytime = mytce.epoch\n while mytime < KEPLER_END_BK:\n mjd = bkjd_to_mjd_approximate(mytime)\n newrow = {'transit_time': mytime,\n 'transit_time_mjd': mjd,\n 'tce': mytce.tce,\n 'kic': mytce.kic,\n 'period': mytce.period,\n 'mes': mytce.mes,\n 'disposition': mytce.disposition,\n 'not_transit_like_flag': mytce.not_transit_like_flag,\n 'significant_secondary_flag': mytce.significant_secondary_flag,\n 'centroid_offset_flag': mytce.centroid_offset_flag,\n 'ephemeris_match_flag': mytce.ephemeris_match_flag,\n 'minor_descriptive_flag': mytce.minor_descriptive_flag,\n 'quarter': mjd2quarter(mjd),\n 'season': mjd2season(mjd)}\n if newrow['quarter'] is not None: # Ignore unobserved transits (falling between quarter boundaries)\n transitrows.append(newrow)\n mytime += mytce.period\n transits = pd.DataFrame(transitrows)\n print('Found {} long-period transits in {}'.format(len(transits), tce_input_fn))\n\n # Add the CCD channel number and mod/out for each transit\n ccdinfo = pd.read_csv('../data/kepler-ccd-info-by-kic.csv')\n # This join will remove unobserved transits, i.e. transits in a quarter on a dead module\n transits_with_ccdinfo = pd.merge(transits, ccdinfo,\n left_on=['kic', 'quarter'],\n right_on=['sci_kepler_id', 'sci_data_quarter'])\n assert((transits_with_ccdinfo.season == transits_with_ccdinfo.sci_season).all()) # Sanity check\n\n print('Found {} observed long-period transits'.format(len(transits_with_ccdinfo)))\n transits_with_ccdinfo.rename(columns={'sci_skygroup_id': 'skygroup',\n 'sci_channel': 'channel',\n 'sci_module': 'module',\n 'sci_output': 'output'},\n inplace=True)\n export_columns = ['tce', 'kic', 'transit_time', 'period', 'mes', 'disposition',\n 'not_transit_like_flag', 'significant_secondary_flag', 'centroid_offset_flag',\n 'ephemeris_match_flag', 'minor_descriptive_flag',\n 'quarter', 'season', 'skygroup', 'channel', 'module', 'output']\n return transits_with_ccdinfo[export_columns]\n\n\nif __name__ == '__main__':\n # Real 'OPS' TCE detection results\n ops = make_transit_table('../data/OPS-TCEs.txt', robovetter_input_fn='../data/RoboVetterOut-OPS.txt')\n ops.to_csv('intermediate-output/ops-tces-transits.csv', index=False)\n\n # Inversion run:\n # We use the csv table prepared in the 'step 0' script, which contains the\n # TCEs detected in the inverted data plus reliable OPS planet candidates.\n inv = make_transit_table('intermediate-output/inv-tces-for-skye.csv')\n inv.to_csv('intermediate-output/inv-tces-transits.csv', index=False)\n\n # Season scrambling run:\n # Again, we use the csv table prepared by the step 0 script.\n ss1 = make_transit_table('intermediate-output/ss1-tces-for-skye.csv')\n ss1.to_csv('intermediate-output/ss1-tces-transits.csv', index=False)\n" }, { "alpha_fraction": 0.7678571343421936, "alphanum_fraction": 0.7916666865348816, "avg_line_length": 32.400001525878906, "blob_id": "05803c57ce64f397a94bd3580c0362e57ac25930", "content_id": "00a9b66ef097f41d8e59b5a0833dcf9df4719ba6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 168, "license_type": "no_license", "max_line_length": 45, "num_lines": 5, "path": "/scripts/Makefile", "repo_name": "barentsen/kepler-skye", "src_encoding": "UTF-8", "text": "\nall:\n\tpython 0-prepare-tce-table-for-inv-and-ss.py\n\tpython 1-prepare-transit-tables.py\n\tpython 2-determine-transit-rates.py\n\tpython 3-determine-binomial-thresholds.py\n" }, { "alpha_fraction": 0.7329803109169006, "alphanum_fraction": 0.7496218085289001, "avg_line_length": 37.882354736328125, "blob_id": "00a6dfe7b15cbb4a3b4416134872460358cd35a2", "content_id": "e201a7647891fa9f29191b2e9c56de706b7f889a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1322, "license_type": "no_license", "max_line_length": 92, "num_lines": 34, "path": "/data/README.md", "repo_name": "barentsen/kepler-skye", "src_encoding": "UTF-8", "text": "Skye input data\n===============\n\nThis directory contains a copy of the output of the Kepler planet detection\npipeline, version DR25, copied on Oct 25, 2017.\n\nThe pipeline was run on different sets of lightcurves to assess completeness\nand reliability. These runs include actual Kepler lightcurves ('OPS'),\ninverted lightcurves ('INV'), and season-scrambled lightcurves ('SS1').\n\nThe TCEs detected by these different runs are located in this directory\nunder the following names:\n\n* OPS-TCEs.txt (copied from /soc/nfs/so-nfs/DR25/OPS/DATA/TCEs.txt)\n* INV-TCEs.txt (copied from /soc/nfs/so-nfs/DR25/INV/DATA/TCEs.txt)\n* SS1-TCEs.txt (copied from /soc/nfs/so-nfs/DR25/SS1/DATA/TCEs.txt)\n\nAnd we also need the latest RoboVetter output:\n\n* RoboVetterOut-SS1.txt (copied from /soc/nfs/so-nfs/DR25/SS1/RoboVet/RoboVetterOut-SS1.txt)\n\nThe table called `kepler-ccd-info-by-kic.csv` was created using the\nfollowing query at MAST/CasJobs:\n\n SELECT\n sci_kepler_id, sci_data_quarter, sci_season, sci_skygroup_id,\n sci_module, sci_channel, sci_output\n INTO mydb.kepler_ccd_info\n FROM kepler.kepler_science\n WHERE sci_archive_class = 'TPL';\n\nFinally, the table `kepler-quarters.csv` details the quarter numbers\nand time intervals for all Kepler Quarters. \nIt was created by hand based on the data release notes.\n" }, { "alpha_fraction": 0.6325063109397888, "alphanum_fraction": 0.6411362886428833, "avg_line_length": 46.94827651977539, "blob_id": "a9c3c910a76be5f1893849477cbceefbf9873abe", "content_id": "d42e9428b9eaa8259308691463d1689e7d0aacc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2781, "license_type": "no_license", "max_line_length": 99, "num_lines": 58, "path": "/scripts/0-prepare-tce-table-for-inv-and-ss.py", "repo_name": "barentsen/kepler-skye", "src_encoding": "UTF-8", "text": "\"\"\"\nTakes the sample of TCEs detected in the INV (inversion) and\nSS1 (season scrambling) runs and add the sample of 'reliable' TCEs\nfrom the OPS (i.e. real data) run to them. The purpose of doing this is\nto obtain a TCE sample to which the Skye false-positive identification\nalgorithm can meaningfully be applied.\n\"\"\"\n\nimport pandas as pd\n\nTCE_TABLE_COLUMNS = ['tce', 'kic', 'pn', 'period', 'epoch', 'mes', 'depth', 'duration',\n 'rplanet', 'rstar', 'tstar', 'logg', 'a', 'radratio', 'arstar', 'snr', 'srad']\nROBOVETTER_COLUMNS = ['tce', 'score', 'disposition', 'not_transit_like_flag',\n 'significant_secondary_flag', 'centroid_offset_flag',\n 'ephemeris_match_flag', 'minor_descriptive_flag']\n\n\ndef xmatch_robovetter(tce_input_fn, robovetter_input_fn):\n \"\"\"Adds the columns from the robovetter output to the TCE list.\"\"\"\n tce_df = pd.read_fwf(tce_input_fn, comment='#', names=TCE_TABLE_COLUMNS)\n robovetter_df = pd.read_fwf(robovetter_input_fn, comment='#', names=ROBOVETTER_COLUMNS)\n xmatch_df = pd.merge(tce_df, robovetter_df, on='tce', how='inner')\n # Both tcedf_tmp and robovetterdf should have the same number of TCEs\n assert(len(xmatch_df) == len(robovetter_df)) # sanity check\n return xmatch_df\n\n\ndef xmatch_clean(tce_df, clean_input_fn):\n \"\"\"Only use the TCEs which are in the 'clean' table.\"\"\"\n clean_df = pd.read_csv(clean_input_fn)\n xmatch_df = pd.merge(tce_df, clean_df, on='tce', how='inner')\n # Sanity check: are SS1-TCEs.txt and the clean table aligned?\n #assert(len(df_clean) == len(tce_df))\n #assert((df_clean.tce == tce_df.tce).sum() == len(df_clean))\n # Make a new dataframe containing only the TCEs to keep\n return xmatch_df[xmatch_df.keep]\n\n\ndef add_real_planet_candidates(tce_df):\n \"\"\"Take a sample of TCEs and add reliable OPS TCEs to them.\"\"\"\n ops_df = xmatch_robovetter('../data/OPS-TCEs.txt', '../data/RoboVetterOut-OPS.txt')\n planet_candidates_mask = (\n (ops_df.not_transit_like_flag == 0) &\n (ops_df.ephemeris_match_flag == 0)\n )\n return pd.concat((tce_df, ops_df[planet_candidates_mask]))\n\n\nif __name__ == '__main__':\n df = xmatch_robovetter('../data/SS1-TCEs.txt', '../data/RoboVetterOut-SS1.txt')\n df = xmatch_clean(df, '../data/ss1TCEClean-900day-7mes-Dec2016.csv')\n df = add_real_planet_candidates(df)\n df.to_csv('intermediate-output/ss1-tces-for-skye.csv')\n\n df = xmatch_robovetter('../data/INV-TCEs.txt', '../data/RoboVetterOut-INV.txt')\n df = xmatch_clean(df, '../data/invTCEClean-100day-9mes-Nov2016-no-header.csv')\n df = add_real_planet_candidates(df)\n df.to_csv('intermediate-output/inv-tces-for-skye.csv')\n" }, { "alpha_fraction": 0.5868489742279053, "alphanum_fraction": 0.5975676774978638, "avg_line_length": 51.5415153503418, "blob_id": "225196ba2a4149823086945c5e5d46bd518b3e64", "content_id": "ceb7d3838fa041a0cde6bc3aceebdf7accb31c76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14554, "license_type": "no_license", "max_line_length": 188, "num_lines": 277, "path": "/scripts/3-determine-binomial-thresholds.py", "repo_name": "barentsen/kepler-skye", "src_encoding": "UTF-8", "text": "\"\"\"This final step identifies the (epoch, skygroup) pairs that show a\n'suspiciously large' number of transits from long-period/low-mes TCEs;\nsuch clusters of transits are indicative of noisy data.\n\nThe term 'suspiciously large' is quantified by means of a probability\nto see the given number of transits, as inferred from the appropriate\nbinomial distribution.\n\"\"\"\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import binom, poisson\nfrom tqdm import tqdm\n\n\nKEPLER_BEGIN_BK, KEPLER_END_BK = 130, 1582\n\n# Exclude the channels below from the statistics; these are known to show\n# a large spurious population of TCEs due to rolling band artefacts.\nOUTLIER_CHANNELS = [26, 44, 58]\n\n\nclass SkyeMetric(object):\n \"\"\"This class reads in transit data and identifies (epoch, skygroup) pairs\n that show a suspiciously large number of transits.\"\"\"\n\n def __init__(self, transit_table_fn, rates_table_fn, binsize=1.,\n probability_threshold=1e-3, probability_threshold_combined=None):\n if probability_threshold_combined is None:\n probability_threshold_combined = probability_threshold\n print('Reading {} and {}'.format(transit_table_fn, rates_table_fn))\n self.transits = pd.read_csv(transit_table_fn)\n self.rates = pd.read_csv(rates_table_fn, index_col=['skygroup', 'season'])\n self.binsize = binsize\n self.probability_threshold = probability_threshold\n self.probability_threshold_combined = probability_threshold_combined\n\n self.transits['bin_id'] = binsize * np.floor(self.transits.transit_time / binsize)\n self._compute()\n\n def _compute(self):\n self.thresholds = self._compute_binomial_thresholds()\n self.bad_bins = self._compute_bad_bins()\n self.bad_bin_ids_all_channels = self._compute_bad_bins_all_channels()\n\n def _compute_binomial_thresholds(self):\n rates = self.rates\n tce_expect_col, rate_expected_col, rate_threshold_col = [], [], []\n for skygroup, season in rates.index:\n mask_reference = ((rates.index.get_level_values('skygroup') == skygroup) &\n (rates.index.get_level_values('season') != season) &\n ~rates.channel.isin(OUTLIER_CHANNELS))\n\n # Compute the probability for a TCE to produce a transit in a given bin\n n_transits_per_bin = self.binsize * rates[mask_reference].transits_per_day\n n_tces = rates[mask_reference].n_tces\n mean_transit_probability = (n_transits_per_bin / n_tces).mean()\n\n rate_expected_col.append(n_transits_per_bin.median())\n tce_expect_col.append(n_tces.median())\n rate_threshold_col.append(int(binom.ppf(1 - self.probability_threshold,\n int(n_tces.median()),\n mean_transit_probability)))\n\n rates['n_tces_expected'] = tce_expect_col\n rates['transit_rate_expected'] = rate_expected_col\n rates['transit_rate_threshold'] = rate_threshold_col\n return rates\n\n def _compute_bad_bins(self):\n transit_rate_groupby = self.transits.groupby(['bin_id', 'skygroup', 'season'])\n transit_rate = pd.DataFrame(transit_rate_groupby.size(), columns=['transit_rate_observed'])\n transit_rate['channel'] = transit_rate_groupby.first().channel\n transit_rate['quarter'] = transit_rate_groupby.first().quarter\n assert((transit_rate_groupby.first().channel == transit_rate_groupby.mean().channel).all())\n assert((transit_rate_groupby.first().quarter == transit_rate_groupby.mean().quarter).all())\n\n threshold_col = []\n for bin_id, skygroup, season in transit_rate.index:\n threshold_col.append(self.thresholds.ix[skygroup, season].transit_rate_threshold)\n transit_rate['transit_rate_threshold'] = threshold_col\n\n # Identify the bad bins\n mask_bad_bins = transit_rate.transit_rate_observed > transit_rate['transit_rate_threshold']\n print('Identified {} bad bins for single channels.'.format(mask_bad_bins.sum()))\n\n mask_transits_in_bad_bins = self.transits.channel > 999 # All False\n for bin_id, skygroup, season in transit_rate[mask_bad_bins].index:\n mask_transits_in_bad_bins |= (\n (self.transits.bin_id == bin_id) &\n (self.transits.skygroup == skygroup) &\n (self.transits.season == season)\n )\n self.mask_transits_in_bad_bins = mask_transits_in_bad_bins\n print('Flagged {} out of {} transits as suspicious.'.format(mask_transits_in_bad_bins.sum(),\n len(mask_transits_in_bad_bins)))\n\n return transit_rate[mask_bad_bins]\n\n def _compute_bad_bins_all_channels(self):\n tces_to_remove = self.transits[self.mask_transits_in_bad_bins].tce.unique()\n mask_transits_to_remove = self.transits.tce.isin(tces_to_remove)\n\n # Make a cut across all channels\n total_transit_count = self.transits[~mask_transits_to_remove].groupby('bin_id').size()\n n_tces = self.transits[~mask_transits_to_remove].tce.unique().size\n p_transit = total_transit_count.median() / n_tces\n total_count_threshold = binom.ppf(1 - self.probability_threshold_combined,\n int(n_tces),\n p_transit)\n # print('n={} p={}'.format(n_tces, p_transit))\n\n bins_to_remove = total_transit_count[total_transit_count > total_count_threshold].index\n print('Identified {} bad bins for all channels.'.format(len(bins_to_remove)))\n\n self.mask_transits_in_bad_bin_ids = self.transits['bin_id'].isin(bins_to_remove)\n print('Flagged {} out of {} transits as suspicious.'.format(self.mask_transits_in_bad_bin_ids.sum(),\n len(self.mask_transits_in_bad_bin_ids)))\n\n\n return bins_to_remove\n\n def percent_removed(self):\n \"\"\"Returns the fraction of the total Kepler data set removed by the metric.\"\"\"\n n_bins_total = len(self.transits['bin_id'].unique()) * 80 # assume 80 channels\n n_bins_removed = len(self.bad_bins) + 80 * len(self.bad_bin_ids_all_channels)\n return 100 * n_bins_removed / n_bins_total\n\n def print_summary(self):\n print('Number of bad bins per channel:')\n print(self.bad_bins.reset_index().groupby('channel').size().sort_values(ascending=False).head(10))\n print('Metric removes {:.1f}% of data.'.format(self.percent_removed()))\n\n\n def write_definition(self, output_fn):\n \"\"\"Produces a definition file listing all flagged time bins.\"\"\"\n print('Writing skye metric definition to {}'.format(output_fn))\n with open(output_fn, 'w') as out:\n out.write('# This file specifies the times (floored bkjd) and skygroups during which\\n')\n out.write('# an anomalous number of long-period (>50 days) transits were detected.\\n')\n out.write('#\\n')\n out.write('# bkjd skygroup\\n')\n for row in self.bad_bins.reset_index().itertuples():\n out.write('{:.2f} {}\\n'.format(row.bin_id, row.skygroup))\n for bin_id in self.bad_bin_ids_all_channels:\n for channel in range(1, 85):\n out.write('{} {}\\n'.format(bin_id, channel))\n\n\n def write_definition_transits(self, output_fn):\n \"\"\"Produces a definition file listing all flagged transits rather than time bins.\"\"\"\n print('Writing skye metric definition to {}'.format(output_fn))\n with open(output_fn, 'w') as out:\n out.write('# This file specifies the transits flagged by Geert\\'s skye as unreliable.\\n')\n out.write('#\\n')\n out.write('# tce transit_time_bkjd flag\\n')\n for row in self.transits[self.mask_transits_in_bad_bins].itertuples():\n out.write('{} {:.10f} 1\\n'.format(row.tce, row.transit_time))\n \"\"\"\n for row in self.bad_bins.reset_index().itertuples():\n out.write('{:.2f} {}\\n'.format(row.bin_id, row.skygroup))\n # Flag transits that fell on the given skygroup \n # the time interval [bkjd_start, bkjd_start + binsize]\n bkjd_start = row.bin_id\n mask_flagged_transits = (self.transits.skygroup == row.skygroup\n self.transits.transit_time >= bkjd_start\n self.transits.transit_time < (bkjd_start + self.binsize)\n \"\"\" \n #for bin_id in self.bad_bin_ids_all_channels:\n # for channel in range(1, 85):\n # out.write('{} {}\\n'.format(bin_id, channel))\n\n def write_definition_transits2(self, output_fn):\n \"\"\"Alternative implementation of the above to verify correctness.\"\"\"\n print('Writing skye metric definition to {}'.format(output_fn))\n with open(output_fn, 'w') as out:\n out.write('# This file specifies the transits flagged by Geert\\'s skye as unreliable.\\n')\n out.write('#\\n')\n out.write('# tce transit_time_bkjd flag\\n')\n #for row in self.transits[self.mask_transits_in_bad_bins].itertuples():\n # out.write('{} {:.8f} 1\\n'.format(row.tce, row.transit_time))\n for row in self.bad_bins.reset_index().itertuples():\n # Flag transits that fell on the given skygroup \n # the time interval [bkjd_start, bkjd_start + binsize]\n bkjd_start = row.bin_id\n mask_flagged_transits = (\n (self.transits.skygroup == row.skygroup) &\n (self.transits.transit_time >= bkjd_start) &\n (self.transits.transit_time < (bkjd_start + self.binsize))\n )\n for row in self.transits[mask_flagged_transits].itertuples():\n out.write('{} {:.10f} 1\\n'.format(row.tce, row.transit_time))\n #for bin_id in self.bad_bin_ids_all_channels:\n # for channel in range(1, 85):\n # out.write('{} {}\\n'.format(bin_id, channel))\n\n\n\n def plot(self, output_fn):\n mask_transits_flagged = self.mask_transits_in_bad_bins | self.mask_transits_in_bad_bin_ids\n tces_to_remove = self.transits[mask_transits_flagged].tce.unique()\n mask_transits_to_remove = self.transits.tce.isin(tces_to_remove)\n print('Transits directly flagged: {}'.format(mask_transits_flagged.sum()))\n print('Transits affected: {}'.format(mask_transits_to_remove.sum()))\n\n\n pc_before = (self.transits.groupby('tce').first().disposition == 'PC').sum()\n pc_after = (self.transits[~mask_transits_to_remove].groupby('tce').first().disposition == 'PC').sum()\n print('Retain {} out of {} PCs.'.format(pc_after, pc_before))\n\n q10, q50, q90 = np.percentile(self.transits[~mask_transits_to_remove].groupby('tce').first().mes, [10, 50, 90])\n print('mes removed [{}, {}, {}]'.format(q10, q50, q90))\n\n import matplotlib.pyplot as pl\n from matplotlib import rcParams\n rcParams[\"savefig.dpi\"] = 100\n import seaborn as sns\n print('Writing {}'.format(output_fn))\n pl.figure(figsize=(10, 7))\n pl.subplot(211)\n _ = pl.hist(self.transits.groupby('tce').first().period,\n bins=100, label='TCEs considered', facecolor='red')\n _ = pl.hist(self.transits[~mask_transits_to_remove].groupby('tce').first().period,\n bins=100, label='Not affected by cut', facecolor='blue')\n pl.xlabel('Period [days]', fontsize=14)\n pl.legend()\n pl.title('Binomial threshold (binsize={}, p={})'.format(self.binsize, self.probability_threshold), fontsize=18)\n #pl.text(700, 400, 'Only showing\\nperiod > 100 days\\nmax_mult_ev < 20', ha='left')\n pl.text(600, 400, 'Removes {:.1f}% of data\\nRetains {} out of {} transit-like TCEs\\nMedian MES removed: {:.1f}'.format(self.percent_removed(), pc_after, pc_before, q50), ha='left')\n\n pl.subplot(212)\n _ = pl.hist(self.transits.transit_time,\n lw=0, facecolor='red', label='All long-period/low-mes transits',\n bins=(KEPLER_END_BK - KEPLER_BEGIN_BK),\n range=(KEPLER_BEGIN_BK, KEPLER_END_BK))\n _ = pl.hist(self.transits[~mask_transits_flagged].transit_time,\n lw=0, facecolor='blue', label='Transits deemed ok',\n bins=(KEPLER_END_BK - KEPLER_BEGIN_BK),\n range=(KEPLER_BEGIN_BK, KEPLER_END_BK))\n pl.ylim([0, 200])\n pl.xlim([KEPLER_BEGIN_BK, KEPLER_END_BK])\n pl.legend()\n pl.xlabel('Transit time', fontsize=14)\n #pl.text(1390, 120, 'Only showing\\nperiod > 100 days\\nmax_mult_ev < 20', ha='left')\n pl.tight_layout()\n pl.savefig(output_fn)\n pl.close()\n\n\ndef run_skye(prefix, binsize=0.5, p_threshold=1e-6, p_threshold_global=1e-6):\n output_prefix = 'output/{}-bin{:.2f}-p{:.0e}'.format(prefix, binsize, p_threshold)\n transit_table_fn = 'intermediate-output/' + prefix + '-tces-transits.csv'\n #transit_rates_fn = 'intermediate-output/' + prefix + '-tces-transit-rates.csv'\n transit_rates_fn = 'intermediate-output/ops-tces-transit-rates.csv'\n skye = SkyeMetric(transit_table_fn,\n transit_rates_fn,\n binsize=binsize,\n probability_threshold=p_threshold,\n probability_threshold_combined=p_threshold_global)\n skye.print_summary()\n skye.write_definition(output_prefix + '-definition.txt')\n skye.write_definition_transits(output_prefix + '-definition-transits.txt')\n skye.write_definition_transits2(output_prefix + '-definition-transits2.txt')\n skye.plot(output_prefix + '-skye.png')\n skye.thresholds.to_excel(output_prefix + '-thresholds.xls')\n skye.bad_bins.to_excel(output_prefix + '-badbins.xls')\n return skye\n\n\nif __name__ == '__main__':\n\n for prefix in ['ops', 'inv', 'ss1']:\n for binsize in [0.5]:\n for p_threshold in [1e-4, 5e-4]:\n skye = run_skye(prefix,\n binsize=binsize,\n p_threshold=p_threshold,\n p_threshold_global=1e99)\n" }, { "alpha_fraction": 0.6132580041885376, "alphanum_fraction": 0.6373364925384521, "avg_line_length": 39.0476188659668, "blob_id": "55075e625eec68e28b42dad06ec4d970cb86ef1c", "content_id": "2ea44407127b125df438fd6b64202f75621ac8bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3364, "license_type": "no_license", "max_line_length": 111, "num_lines": 84, "path": "/scripts/2-determine-transit-rates.py", "repo_name": "barentsen/kepler-skye", "src_encoding": "UTF-8", "text": "\"\"\"Produces a table detailing the frequency of transits observed for\nlong-period/low-mes TCEs as a function of skygroup and season.\nThis table will help us identify suspicious peaks in the number of transits\nat a given epoch/skygroup in the next step.\"\"\"\nimport pandas as pd\nfrom tqdm import tqdm\n\n# The list of quarters in each season\nSEASON_QUARTERS = {\n 0: [2, 6, 10, 14],\n 1: [3, 7, 11, 15],\n 2: [4, 8, 12, 16],\n 3: [1, 5, 9, 13, 17]\n }\n\n\ndef count_days_per_season():\n \"\"\"Returns a pandas dataframe specifying the number of days per season.\n\n The returned dataframe should look like this:\n\n n_days\n season\n 0 369.398637\n 1 373.424100\n 2 325.282400\n 3 347.636795\n \"\"\"\n KEPLER_QUARTERS = pd.read_csv('../data/kepler-quarters.csv')[1:]\n DAYS_PER_QUARTER = KEPLER_QUARTERS.last_lc_mjd - KEPLER_QUARTERS.first_lc_mjd\n seasons = [0, 1, 2, 3]\n n_days = []\n for season in seasons:\n quarters = SEASON_QUARTERS[season]\n n_days.append(sum([DAYS_PER_QUARTER.ix[q] for q in quarters]))\n df_n_days = pd.DataFrame({'n_days': n_days}, index=seasons)\n df_n_days.index.name = 'season'\n return df_n_days\n\n\ndef get_transit_rates(transit_table_fn, planet_candidates_only=True):\n \"\"\"Returns a pandas dataframe detailing the observed frequency of transits\n grouped by skygroup and season.\n \"\"\"\n # Read the list of transits\n transits = pd.read_csv(transit_table_fn)\n if planet_candidates_only:\n planet_candidates_mask = (\n (transits.not_transit_like_flag == 0) &\n (transits.ephemeris_match_flag == 0)\n )\n transits = transits[planet_candidates_mask]\n\n # Group the transits by skygroup and season and count their number;\n # store the counts in a dataframe\n groupby = transits.groupby(['skygroup', 'season'])\n df_n_transits = pd.DataFrame(groupby.size(), columns=['n_transits'])\n df_n_transits['n_tces'] = groupby.tce.nunique() # number of unique TCEs\n\n # To determine the number of transits per day for a given season,\n # we first need to count the number of days contained in each season.\n df_n_days = count_days_per_season()\n # How many transits per day are in each skygroup/season?\n df_rates = df_n_transits.merge(df_n_days, left_index=True, right_index=True)\n df_rates['transits_per_day'] = df_rates.n_transits / df_rates.n_days\n\n # Add the channel number\n channel = []\n for skygroup, season in df_rates.index:\n channel.append(transits[(transits.skygroup == skygroup) & (transits.season == season)].channel.iloc[0])\n df_rates['channel'] = channel\n\n return df_rates\n\n\nif __name__ == '__main__':\n ops = get_transit_rates('intermediate-output/ops-tces-transits.csv', planet_candidates_only=True)\n ops.to_csv('intermediate-output/ops-tces-transit-rates.csv')\n\n inv = get_transit_rates('intermediate-output/inv-tces-transits.csv', planet_candidates_only=True)\n inv.to_csv('intermediate-output/inv-tces-transit-rates.csv')\n\n ss1 = get_transit_rates('intermediate-output/ss1-tces-transits.csv', planet_candidates_only=True)\n ss1.to_csv('intermediate-output/ss1-tces-transit-rates.csv')\n" } ]
7
gpthimble/Thremal-Printer
https://github.com/gpthimble/Thremal-Printer
81a8333ab6f859f1c8d603c0fc06a9cb48435fd6
df42d1603683697e84aa359eb97caafbeeb1ae7b
8778e2d677ea67b89e4e41d405fb63df0fdf17ab
refs/heads/master
2021-01-11T21:38:31.117252
2017-01-13T09:25:32
2017-01-13T09:25:32
78,826,984
4
0
null
null
null
null
null
[ { "alpha_fraction": 0.5369036197662354, "alphanum_fraction": 0.5548457503318787, "avg_line_length": 31.409690856933594, "blob_id": "6a174ef281919c1d1645e4dad5b80dbfb585c2d0", "content_id": "3d9ff1301fea585c131cb256df4c8a3f41343469", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7357, "license_type": "no_license", "max_line_length": 105, "num_lines": 227, "path": "/printer_driver.py", "repo_name": "gpthimble/Thremal-Printer", "src_encoding": "UTF-8", "text": "from PIL import Image,ImageDraw,ImageFont\nimport sys,getopt,serial\ntextsize=40\nprinterlenth=384\ntotalpoints=textsize*printerlenth\ntotalbytes=totalpoints//8\nbytesperline=printerlenth//8\nfontname='times.ttf'\ntext=''\nport='null'\nalignX=0\nalignY=0\nim=Image.new('L',(printerlenth,textsize),color=0)\ndraw=ImageDraw.Draw(im)\nfont=ImageFont.truetype(font=fontname,size=int(textsize*0.8))\ndraw.text((alignX,alignY),text,fill=15,font=font)\nim_data_list=list(im.getdata())\n\n\ndef get_bit(byteval,idx):\n\n return ((byteval&(1<<idx))!=0);\n\ndef set_bit(v, index, x):\n \"\"\"Set the index:th bit of v to x, and return the new value.\"\"\"\n mask = 1 << index\n v &= ~mask\n if x:\n v |= mask\n return v\n\ndef get_line(line):\n\n return im_data_list[(line)*printerlenth:(line+1)*printerlenth];\n\ndef get_point(line , row):\n\n return get_line(line)[row];\n\ndef get_sliced_point(line,row):\n\n return get_sliced_line(line)[row]\n\ndef slice_point(point):\n\n return [get_bit(point,3),get_bit(point,2),get_bit(point,1),get_bit(point,0)]\n\ndef get_sliced_data(data):\n sliced_data= []\n to_be_sliced_data=data\n for points in to_be_sliced_data:\n sliced_data.append(slice_point(points));\n return sliced_data\n\ndef get_sliced_line(line):\n\n return sliced_data[(line)*printerlenth:(line+1)*printerlenth];\n\ndef get_bytes(sliced):\n data_bytes=[]\n for byte in range(0,int(totalbytes)):\n num1=0\n num2=0\n num3=0\n num4=0\n for num in range(0,8):\n num1=set_bit(num1,7-num,sliced[(byte)*8+num][0])\n num2=set_bit(num2,7-num,sliced[(byte)*8+num][1])\n num3=set_bit(num3,7-num,sliced[(byte)*8+num][2])\n num4=set_bit(num4,7-num,sliced[(byte)*8+num][3])\n data_bytes.append([num1,num2,num3,num4])\n return data_bytes\n\ndef get_bytes_line(line):\n\n return(data_in_bytes[line*48:(line+1)*48])\n\ndef show_help():\n print('printer driver help')\n print('-h --help :show this message')\n print('-s <textsize> --size <textsize> :Set text size,Default is 40')\n print('-f <\\'fontname\\'> --font <\\'fontname\\'> :set font style Default is times.ttf')\n print('-p <\\'port\\'> --port <\\'port\\'> :set serial port numbner,eg:COM4')\n print('-t <\\'text\\'> --text <\\'text\\'> :texts to be printed.Default is \\'hello world\\'') \n\ndef error_occured():\n print('error occured.')\n sys.exit(2) \n\nsliced_data=get_sliced_data(im_data_list)\ndata_in_bytes=get_bytes(sliced_data)\n\ndef main(argv):\n #print(sys.argv)\n global port\n global textsize\n global printerlenth\n global totalpoints\n global totalbytes\n global bytesperline\n global fontname\n global text\n global im\n global draw\n global font\n global im_data_list\n global sliced_data\n global data_in_bytes\n global alignX,alignY\n #deal with interface.\n try:\n opts,args=getopt.getopt(argv,\"hs:f:p:t:X:Y:\",[\"help\",\"size=\",\"font=\",\"port=\",\"text=\"])\n except getopt.GetoptError:\n print('Can\\'t find option :',str(sys.argv[1:]))\n show_help()\n sys.exit(2)\n if len(sys.argv) == 1:\n show_help()\n sys.exit(2)\n for opt,arg in opts:\n if opt in ('-h','--help'):\n show_help()\n sys.exit()\n elif opt in ('-s','--size'):\n textsize=int(arg)\n elif opt in ('-f','--font'):\n fontname=arg\n elif opt in ('-p','--port'):\n port=arg\n elif opt in('-t','--text'):\n text=arg\n elif opt in ('-X'):\n \talignX=int(arg)\n elif opt in ('-Y'):\n \talignY=int(arg)\n if port=='null':\n print('please use -p(--port) to set correct serial port number.')\n sys.exit(2)\n totalpoints=textsize*printerlenth\n totalbytes=totalpoints//8\n bytesperline=printerlenth//8\n im=Image.new('L',(printerlenth,textsize),color=0)\n draw=ImageDraw.Draw(im)\n try:font=ImageFont.truetype(font=fontname,size=int(textsize*0.8))\n except BaseException:\n \tprint('Error occured:')\n \tprint('Uable to locate font file:',fontname,',use default instead.')\n \tfont=ImageFont.truetype(font='simsun.ttc',size=int(textsize*0.8))\n draw.text((alignX,alignY),text,fill=15,font=font)\n im_data_list=list(im.getdata())\n sliced_data=get_sliced_data(im_data_list)\n data_in_bytes=get_bytes(sliced_data)\n #setting up serial port\n uart=serial.Serial()\n uart.baudrate=115200\n uart.port=port\n uart.timeout=1\n try : uart.open()\n except\tBaseException:\n \tprint('Error occured:')\n \tprint('Uable to open Serial Port:',port)\n \tsys.exit(2)\n else:print ('Serial Port',port,'is opened')\n #begin communacation cycle\n state = 1\n tries = 0\n while 1 :\n #1st hand shaking with the printer\n if state ==1:\n if tries < 10:\n tries = tries +1\n print('trying to connect the printer in',tries,'atemps')\n uart.write(bytes([textsize]))\n if uart.read() != bytes([textsize]) : \n state = 1\n else :\n state = 2\n break\n\n else:\n print('communacation error occured.')\n sys.exit(2)\n \n if state == 2:\n print('connected,sending data')\n for line in range(0,textsize):\n line_waiting_for_sending=get_bytes_line(line)\n for byte in range(0,bytesperline):\n uart.write(bytes([line_waiting_for_sending[byte][0]]))\n #print(uart.read()) #for test useage\n print('scale0 sent,waiting for acknowledgement')\n #uart.write(b'\\x01') #for test useage\n if uart.read()!=b'\\x01':error_occured()\n else:\n for byte in range(0,bytesperline): \n uart.write(bytes([line_waiting_for_sending[byte][1]]))\n #print(uart.read()) #for test useage\n print('scale1 sent,waiting for acknowledgement')\n #uart.write(b'\\x02') #for test useage\n if uart.read()!=b'\\x02':error_occured()\n else:\n for byte in range(0,bytesperline):\n uart.write(bytes([line_waiting_for_sending[byte][2]]))\n #print(uart.read()) #for test useage\n print('scale2 sent, waiting for acknowledgement')\n #uart.write(b'\\x03') #for test useage\n if uart.read()!=b'\\x03':error_occured()\n else:\n for byte in range(0,bytesperline):\n uart.write(bytes([line_waiting_for_sending[byte][3]]))\n #print(uart.read()) #for test useage\n print('scale3 sent,waiting for acknowledgement')\n #uart.write(b'\\x04') #for test useage\n if uart.read()!=b'\\x04':error_occured()\n else:\n print('line',line,'printed.')\n\n else : \n error_occured() \n \n\n print('done.')\n uart.close()\n print('Serial Port',port,\"is closed\") \n sys.exit(0)\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n" }, { "alpha_fraction": 0.481979101896286, "alphanum_fraction": 0.534229040145874, "avg_line_length": 21.39712905883789, "blob_id": "a64475dd9ec37b949e3caaa0a7e9a9f3c22ab354", "content_id": "279c2cf6653778e742d85bbba6779271a1ecceaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4707, "license_type": "no_license", "max_line_length": 105, "num_lines": 209, "path": "/printer.ino", "repo_name": "gpthimble/Thremal-Printer", "src_encoding": "UTF-8", "text": "//buffer for one line(48 bytes , long 384 points)\nbyte data[48]={0,0,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,0,\n 0,0,0,0,0,0,0,0};\n//pin mapping \nint p_data_in = 2;\nint p_shift_clock=3;\nint p_motor_enable=11;//5\nint p_motor_dir_control=12;//6\nint p_motor_step=13;//7\nint p_latch=4;\nint p_stroke1=14;//可以合并,stroke1-2=11,stroke3-4=10,stroke5-6=9\nint p_stroke2=15;\nint p_stroke3=16;\nint p_stroke4=17;\nint p_stroke5=18;\nint p_stroke6=19;\n//default motor direction\nboolean motor_foward=true;\nfloat multi_heat=1;\nvoid setup() //初始化\n{\n // initialize digital pin LED_BUILTIN as an output.\n pinMode(p_data_in,OUTPUT);\n pinMode(p_shift_clock,OUTPUT);\n pinMode(p_latch,OUTPUT);\n data_out();\n pinMode(p_stroke1, OUTPUT);\n digitalWrite(p_stroke1,LOW);\n pinMode(p_stroke2, OUTPUT);\n digitalWrite(p_stroke2,LOW);\n pinMode(p_stroke3, OUTPUT);\n digitalWrite(p_stroke3,LOW);\n pinMode(p_stroke4, OUTPUT);\n digitalWrite(p_stroke4,LOW);\n pinMode(p_stroke5, OUTPUT);\n digitalWrite(p_stroke5,LOW);\n pinMode(p_stroke6, OUTPUT);\n digitalWrite(p_stroke6,LOW);\n pinMode(p_motor_enable,OUTPUT);\n digitalWrite(p_motor_enable,HIGH);\n pinMode(p_motor_dir_control,OUTPUT);\n digitalWrite(p_motor_dir_control,LOW);\n pinMode(p_motor_step,OUTPUT);\n digitalWrite(p_motor_step,LOW);\n\n digitalWrite(p_motor_enable,LOW); \n digitalWrite(p_motor_enable,HIGH); \n Serial.begin(115200);\n //Serial.print('1');\n //delay(10000);\n }\n\nvoid data_out() //shift out 48bytes long data\n{\n digitalWrite(p_latch,HIGH);\n for (int i = 0; i < 48; ++i)\n {\n shiftOut(p_data_in,p_shift_clock,MSBFIRST,data[i]);\n }\n digitalWrite(p_latch,LOW);}\n\nvoid motor_step (boolean motor_foward,int steps) //run motor \n{\n digitalWrite(p_motor_dir_control,motor_foward);\n for (int i = 0; i < steps; ++i)\n {\n delayMicroseconds(100);\n digitalWrite(p_motor_step,HIGH);\n delayMicroseconds(100);\n digitalWrite(p_motor_step,LOW);\n\n }}\n\nvoid test_data(int paten1,int paten2) //g\n{\n for (int i = 0; i < 48; ++i)\n {\n data[i]=paten1;\n ++i;\n data[i]=paten2;\n }}\n\nvoid print(int heat_time)\n{\n digitalWrite(p_stroke1,HIGH);\n digitalWrite(p_stroke2,HIGH);\n digitalWrite(p_stroke3,HIGH);\n digitalWrite(p_stroke4,HIGH);\n digitalWrite(p_stroke5,HIGH);\n digitalWrite(p_stroke6,HIGH);\n delay(heat_time*multi_heat);\n digitalWrite(p_stroke1,LOW);\n digitalWrite(p_stroke2,LOW);\n digitalWrite(p_stroke3,LOW);\n digitalWrite(p_stroke4,LOW);\n digitalWrite(p_stroke5,LOW);\n digitalWrite(p_stroke6,LOW);\n }\n// the loop function runs over and over again forever\nint incomingByte=0;\nint lines=0;\nint state =0;\nint i=0;\nvoid loop() {\n//data={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};\n if (state==0)\n { \n while(1)\n {\n digitalWrite(p_motor_enable,HIGH);\n //Serial.print('ready!');\n if (Serial.available() > 0) \n {\n incomingByte = Serial.read();\n\n if (int(incomingByte)!=0)\n {\n lines=incomingByte;\n Serial.write(incomingByte);\n state=1;\n digitalWrite(p_motor_enable,LOW);\n break;\n }\n }\n }\n\n }\n else if (state ==1)\n {\n\n i=0;\n while (i < 48)\n {\n if (Serial.available()>0)\n {\n data[i]=Serial.read();\n i++;\n }\n }\n data_out();\n print(16);\n //motor_step(false,1);\n Serial.write(state);\n state = 2;\n }\n else if (state==2) \n {\n i=0;\n while (i < 48)\n {\n if (Serial.available()>0)\n {\n data[i]=Serial.read();\n i++;\n }\n }\n data_out();\n print(8);\n //motor_step(false,1);\n Serial.write(state);\n state = 3;\n }\n else if (state==3)\n {\n \n i=0;\n while (i < 48)\n {\n if (Serial.available()>0)\n {\n data[i]=Serial.read();\n i++;\n }\n }\n data_out();\n print(4);\n //motor_step(false,1);\n Serial.write(state);\n state = 4;\n }\n\n else if (state ==4)\n {\n i=0;\n while (i < 48)\n {\n if (Serial.available()>0)\n {\n data[i]=Serial.read();\n i++;\n }\n }\n data_out();\n print(2);\n motor_step(false,4);\n Serial.write(state);\n lines=lines-1;\n if (lines==0)\n {\n state = 0;\n }\n else\n state = 1;\n }\n }\n\n \n \n\n" } ]
2
haiyingman/flaskProject1
https://github.com/haiyingman/flaskProject1
241a76f2f8dfc045274f537f0c9cf46dcad748e1
f9ebe307e71b6dad67c9637551a17d5b2c144c54
14f6106ef06fb1f96279c832b8a8c2eece97121b
refs/heads/master
2020-06-23T00:06:07.060790
2019-07-24T14:04:09
2019-07-24T14:04:09
198,439,593
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7011494040489197, "alphanum_fraction": 0.7356321811676025, "avg_line_length": 43, "blob_id": "1789272af2499f982e9825820e7fff6f7855736b", "content_id": "4b19d45e426aa7b3ebc8fbe58609fc808c12dc76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 87, "license_type": "no_license", "max_line_length": 58, "num_lines": 2, "path": "/app/models/model.py", "repo_name": "haiyingman/flaskProject1", "src_encoding": "UTF-8", "text": "def tip(percent,billAmount):\n return float((float(percent)/100)) * float(billAmount)" }, { "alpha_fraction": 0.6619496941566467, "alphanum_fraction": 0.6619496941566467, "avg_line_length": 34.33333206176758, "blob_id": "5929142c490a148556ac73259b919339ca67c2aa", "content_id": "584918944c3e39aeb4106de1be222f610b6de0bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 636, "license_type": "no_license", "max_line_length": 109, "num_lines": 18, "path": "/app/routes.py", "repo_name": "haiyingman/flaskProject1", "src_encoding": "UTF-8", "text": "from app import app\nfrom flask import render_template, request\nfrom app.models import model, formopener\[email protected]('/')\[email protected]('/index')\ndef index():\n return render_template(\"index.html\")\n \[email protected]('/tipAmount', methods = [\"GET\",\"POST\"])\ndef tipAmount():\n if request.method == 'GET':\n return \"You did not fill out the form\"\n else:\n userData = dict(request.form)\n billAmount = userData['billAmount']\n percent = userData['percent']\n tipNum = model.tip(percent, billAmount)\n return render_template(\"tipAmount.html\", billAmount = billAmount, percent = percent, tipNum = tipNum)\n" } ]
2
dayouzi/cv_p1
https://github.com/dayouzi/cv_p1
87659c2b5585de08952a58bcf0975e0a4f896657
d727a2094e72affe34caf90d7cbdb190d5ad6a88
960ea7d4a60ba4c8f261d0552836443eb998a663
refs/heads/master
2021-01-19T15:01:41.255055
2015-03-16T19:52:12
2015-03-16T19:52:12
32,348,755
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.586776852607727, "alphanum_fraction": 0.6570248007774353, "avg_line_length": 59.25, "blob_id": "69d15c732aa65292681f6c7c5a5ac0b1e5eee0cd", "content_id": "6d02518aaab2231b0485b0e8e0edf141c7d08a3b", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 242, "license_type": "permissive", "max_line_length": 132, "num_lines": 4, "path": "/src/libsvm-3.20/svm_script_bin.sh", "repo_name": "dayouzi/cv_p1", "src_encoding": "UTF-8", "text": "echo $1\necho $2\nsvm-train.exe ../../corpus/vis$1catfiles/train_$2_$3 ../../corpus/vis$1catfiles/$2_$3_model\nsvm-predict.exe ../../corpus/vis$1catfiles/test_$2_$3 ../../corpus/vis$1catfiles/$2_$3_model ../../corpus/vis$1catfiles/$2_$3_output\n\n" }, { "alpha_fraction": 0.4784262776374817, "alphanum_fraction": 0.5170270204544067, "avg_line_length": 93.49122619628906, "blob_id": "3c9161f59636393a4b7999a368c3c0a7d8b73c98", "content_id": "caeb860b904d896cd5a2d5507538a99556e36efc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 10777, "license_type": "permissive", "max_line_length": 206, "num_lines": 114, "path": "/src/libsvm-3.20/svm_unbalanced.sh", "repo_name": "dayouzi/cv_p1", "src_encoding": "UTF-8", "text": "#run propre.py to generate feature file first and specify sampling method\necho '============================================ BALANCE MUTIPLE ALL'\ncat ../../../test/$1/$1.train |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<NF;++i) printf \"%s\",\" \"i-1\":\"$i;print \" \"i-1\":\"$NF}' > ../../../test/$1/$1.train_svm\ncat ../../../test/$1/$1.test |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<NF;++i) printf \"%s\",\" \"i-1\":\"$i;print \" \"i-1\":\"$NF}' > ../../../test/$1/$1.test_svm\n./svm-train ../../../test/$1/$1.train_svm ../../../test/$1/$1.model\n./svm-predict ../../../test/$1/$1.test_svm ../../../test/$1/$1.model ../../../test/$1/$1.output\n\necho '=========================================== BALANCE MULTIPLE LOCAL'\n\ncat ../../../test/$1/$1.train |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",\" \"i-1\":\"$i;print \"\"}' > ../../../test/$1/$1.train_svm_local\ncat ../../../test/$1/$1.train |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",$i;print \"\"}' > ../../../test/$1/$1.train_local\n\ncat ../../../test/$1/$1.test |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",\" \"i-1\":\"$i;print \"\"}' > ../../../test/$1/$1.test_svm_local\ncat ../../../test/$1/$1.test |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf $i;print \"\"}' > ../../../test/$1/$1.test_local\n./svm-train ../../../test/$1/$1.train_svm_local ../../../test/$1/$1.model_local\n./svm-predict ../../../test/$1/$1.test_svm_local ../../../test/$1/$1.model_local ../../../test/$1/$1.output_local\n\necho '================================================ BALANCE MULTIPLE GLOBAL'\n\ncat ../../../test/$1/$1.train |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",\" \"i-4\":\"$i;print \" \"i-4\":\"$NF}' > ../../../test/$1/$1.train_svm_global\ncat ../../../test/$1/$1.train |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",$i;print $NF}' > ../../../test/$1/$1.train_global\ncat ../../../test/$1/$1.test |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",\" \"i-4\":\"$i;print \" \"i-4\":\"$NF}' > ../../../test/$1/$1.test_svm_global\ncat ../../../test/$1/$1.test |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",$i;print $NF}' > ../../../test/$1/$1.test_global\n./svm-train ../../../test/$1/$1.train_svm_global ../../../test/$1/$1.model_global\n./svm-predict ../../../test/$1/$1.test_svm_global ../../../test/$1/$1.model_global ../../../test/$1/$1.output_global\n\n\n#binary\necho '====================================================== BALANCE BINARY ALL'\ncat ../../../test/$1/$1.train_binary |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<NF;++i) printf \"%s\",\" \"i-1\":\"$i;print \" \"i-1\":\"$NF}' > ../../../test/$1/$1.train_svm_binary\ncat ../../../test/$1/$1.test_binary |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<NF;++i) printf \"%s\",\" \"i-1\":\"$i;print \" \"i-1\":\"$NF}' > ../../../test/$1/$1.test_svm_binary\n./svm-train ../../../test/$1/$1.train_svm_binary ../../../test/$1/$1.model_binary\n./svm-predict ../../../test/$1/$1.test_svm_binary ../../../test/$1/$1.model_binary ../../../test/$1/$1.output_binary\n\necho '=================================================== BALANCE BINARY LOCAL'\ncat ../../../test/$1/$1.train_binary |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",\" \"i-1\":\"$i;print \"\"} ' > ../../../test/$1/$1.train_svm_binary_local\ncat ../../../test/$1/$1.train_binary |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",$i;print \"\"} ' > ../../../test/$1/$1.train_binary_local\ncat ../../../test/$1/$1.test_binary |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",\" \"i-1\":\"$i;print \"\"}' > ../../../test/$1/$1.test_svm_binary_local\ncat ../../../test/$1/$1.test_binary |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",$i;print \"\"}' > ../../../test/$1/$1.test_binary_local\n./svm-train ../../../test/$1/$1.train_svm_binary_local ../../../test/$1/$1.model_binary_local\n./svm-predict ../../../test/$1/$1.test_svm_binary_local ../../../test/$1/$1.model_binary_local ../../../test/$1/$1.output_binary_local\n\necho '==================================================== BALANCE BINARY GLOBAL'\ncat ../../../test/$1/$1.train_binary |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",\" \"i-4\":\"$i;print \" \"i-4\":\"$NF} ' > ../../../test/$1/$1.train_svm_binary_global\ncat ../../../test/$1/$1.train_binary |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",$i;print $NF} ' > ../../../test/$1/$1.train_binary_global\ncat ../../../test/$1/$1.test_binary |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",\" \"i-4\":\"$i;print \" \"i-4\":\"$NF}' > ../../../test/$1/$1.test_svm_binary_global\ncat ../../../test/$1/$1.test_binary |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",$i;print $NF}' > ../../../test/$1/$1.test_binary_global\n\n./svm-train ../../../test/$1/$1.train_svm_binary_global ../../../test/$1/$1.model_binary_global\n./svm-predict ../../../test/$1/$1.test_svm_binary_global ../../../test/$1/$1.model_binary_global ../../../test/$1/$1.output_binary_global\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#unbalanced\necho '======================================================== UNBALANCE MUTIPLE ALL'\ncat ../../../test/$1/$1.train_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<NF;++i) printf \"%s\",\" \"i-1\":\"$i;print \" \"i-1\":\"$NF}' > ../../../test/$1/$1.train_svm_unbalanced\ncat ../../../test/$1/$1.test_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<NF;++i) printf \"%s\",\" \"i-1\":\"$i;print \" \"i-1\":\"$NF}' > ../../../test/$1/$1.test_svm_unbalanced\n./svm-train ../../../test/$1/$1.train_svm_unbalanced ../../../test/$1/$1.model_unbalanced\n./svm-predict ../../../test/$1/$1.test_svm_unbalanced ../../../test/$1/$1.model_unbalanced ../../../test/$1/$1.output_unbalanced\n\necho '===================================================== UNBALANCE MULTIPLE LOCAL'\n\ncat ../../../test/$1/$1.train_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",\" \"i-1\":\"$i;print \"\"}' > ../../../test/$1/$1.train_svm_local_unbalanced\ncat ../../../test/$1/$1.train_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",$i;print \"\"}' > ../../../test/$1/$1.train_local_unbalanced\n\ncat ../../../test/$1/$1.test_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",\" \"i-1\":\"$i;print \"\"}' > ../../../test/$1/$1.test_svm_local_unbalanced\ncat ../../../test/$1/$1.test_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",$i;print \"\"}' > ../../../test/$1/$1.test_local_unbalanced\n\n./svm-train ../../../test/$1/$1.train_svm_local_unbalanced ../../../test/$1/$1.model_local_unbalanced\n./svm-predict ../../../test/$1/$1.test_svm_local_unbalanced ../../../test/$1/$1.model_local_unbalanced ../../../test/$1/$1.output_local_unbalanced\n\necho '======================================= UNBALANCE MULTIPLE GLOBAL'\n\ncat ../../../test/$1/$1.train_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",\" \"i-4\":\"$i;print \" \"i-4\":\"$NF}' > ../../../test/$1/$1.train_svm_global_unbalanced\ncat ../../../test/$1/$1.train_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",$i;print $NF}' > ../../../test/$1/$1.train_global_unbalanced\ncat ../../../test/$1/$1.test_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",\" \"i-4\":\"$i;print \" \"i-4\":\"$NF}' > ../../../test/$1/$1.test_svm_global_unbalanced\ncat ../../../test/$1/$1.test_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",$i;print $NF}' > ../../../test/$1/$1.test_global_unbalanced\n./svm-train ../../../test/$1/$1.train_svm_global_unbalanced ../../../test/$1/$1.model_global_unbalanced\n./svm-predict ../../../test/$1/$1.test_svm_global_unbalanced ../../../test/$1/$1.model_global_unbalanced ../../../test/$1/$1.output_global_unbalanced\n\n\n#binary\necho '===================================== UNBALANCE BINARY ALL'\ncat ../../../test/$1/$1.train_binary_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<NF;++i) printf \"%s\",\" \"i-1\":\"$i;print \" \"i-1\":\"$NF}' > ../../../test/$1/$1.train_svm_binary_unbalanced\ncat ../../../test/$1/$1.test_binary_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<NF;++i) printf \"%s\",\" \"i-1\":\"$i;print \" \"i-1\":\"$NF}' > ../../../test/$1/$1.test_svm_binary_unbalanced\n./svm-train ../../../test/$1/$1.train_svm_binary_unbalanced ../../../test/$1/$1.model_binary_unbalanced\n./svm-predict ../../../test/$1/$1.test_svm_binary_unbalanced ../../../test/$1/$1.model_binary_unbalanced ../../../test/$1/$1.output_binary_unbalanced\n\necho '====================================== UNBALANCE BINARY LOCAL'\ncat ../../../test/$1/$1.train_binary_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",\" \"i-1\":\"$i;print \"\"} ' > ../../../test/$1/$1.train_svm_binary_local_unbalanced\ncat ../../../test/$1/$1.train_binary_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",$i;print \"\"} ' > ../../../test/$1/$1.train_binary_local_unbalanced\ncat ../../../test/$1/$1.test_binary_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",\" \"i-1\":\"$i;print \"\"}' > ../../../test/$1/$1.test_svm_binary_local_unbalanced\ncat ../../../test/$1/$1.test_binary_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=2;i<5;++i) printf \"%s\",$i;print \"\"}' > ../../../test/$1/$1.test_binary_local_unbalanced\n./svm-train ../../../test/$1/$1.train_svm_binary_local_unbalanced ../../../test/$1/$1.model_binary_local_unbalanced\n./svm-predict ../../../test/$1/$1.test_svm_binary_local_unbalanced ../../../test/$1/$1.model_binary_local_unbalanced ../../../test/$1/$1.output_binary_local_unbalanced\n\necho '======================================= UNBALANCE BINARY GLOBAL'\ncat ../../../test/$1/$1.train_binary_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",\" \"i-4\":\"$i;print \" \"i-4\":\"$NF} ' > ../../../test/$1/$1.train_svm_binary_global_unbalanced\ncat ../../../test/$1/$1.train_binary_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",$i;print $NF} ' > ../../../test/$1/$1.train_binary_global_unbalanced\ncat ../../../test/$1/$1.test_binary_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",\" \"i-4\":\"$i;print \" \"i-4\":\"$NF}' > ../../../test/$1/$1.test_svm_binary_global_unbalanced\ncat ../../../test/$1/$1.test_binary_unbalanced |awk 'BEGIN{FS=\" \"}{printf \"%s\",$1; for (i=5;i<NF;++i) printf \"%s\",$i;print $NF}' > ../../../test/$1/$1.test_binary_global_unbalanced\n./svm-train ../../../test/$1/$1.train_svm_binary_global_unbalanced ../../../test/$1/$1.model_binary_global_unbalanced\n./svm-predict ../../../test/$1/$1.test_svm_binary_global_unbalanced ../../../test/$1/$1.model_binary_global_unbalanced ../../../test/$1/$1.output_binary_global_unbalanced\n\n\n\n\n\n" }, { "alpha_fraction": 0.5795297622680664, "alphanum_fraction": 0.6708160638809204, "avg_line_length": 50.57143020629883, "blob_id": "925adbb7c2eb0c8b2ca4911c54386d9221be4580", "content_id": "5bc3dbb6250a5252a8cae7f74a047cdd2f316872", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 723, "license_type": "permissive", "max_line_length": 129, "num_lines": 14, "path": "/src/libsvm-3.20/svm_script.sh", "repo_name": "dayouzi/cv_p1", "src_encoding": "UTF-8", "text": "echo $1\necho $2\n#2 0.03125 47.5358\n#for cross_validation $2 = all_hog/sift\nsvm-scale.exe -l -1 -u 1 ../../corpus/vis$1catfiles/train_$2 > ../../corpus/vis$1catfiles/train_$2.scale\nsvm-scale.exe -l -1 -u 1 ../../corpus/vis$1catfiles/test_$2 > ../../corpus/vis$1catfiles/test_$2.scale\n\nsvm-train.exe -c 2 -g 0.03125 ../../corpus/vis$1catfiles/train_$2.scale ../../corpus/vis$1catfiles/$2_model\nsvm-predict.exe ../../corpus/vis$1catfiles/test_$2.scale ../../corpus/vis$1catfiles/$2_model ../../corpus/vis$1catfiles/$2_output\n\n\n#cross validation\n#svm-train.exe -c 2 -g 0.03125 -v 5 ../../corpus/vis$1catfiles/train_$2.scale\n#python gridregression.py v-log2c -5,5,1 -log2g -4,0,1 -v 5 ../../corpus/vis$1catfiles/train_$2.scale \n" }, { "alpha_fraction": 0.591176450252533, "alphanum_fraction": 0.596323549747467, "avg_line_length": 28.565217971801758, "blob_id": "1cda5fbce3c679d93f3be139e26e29e48f673ebf", "content_id": "dade94dca0053ec37878149312752954719625a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1360, "license_type": "no_license", "max_line_length": 65, "num_lines": 46, "path": "/src/eval", "repo_name": "dayouzi/cv_p1", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom prepro import *\nimport collections, os, sys\nfrom util import getArgMap\nfrom collections import *\nfrom sklearn.metrics import confusion_matrix\n\nargMap = getArgMap(sys.argv[1:])\nsuffix = argMap.get('-s','')#suffix of training and testing files\nnumber = argMap.get('-n','')#number of cats\nbase_path = '../corpus/vis'+number+'catfiles/'\nfeat_suffix = argMap.get('-f','')\n\ndef getConfMatrix():\n y_true, y_pre = [],[]\n r = open(base_path+'test_'+suffix+'_'+feat_suffix)\n for line in r:\n \ty_true.append(int(line.split()[0]))\n r = open(base_path+suffix+'_'+feat_suffix+'_output')\n for line in r:\n \ty_pre.append(int(line))\n cm = confusion_matrix(y_true, y_pre)\n print cm\n \ndef genLabels():\n catID = 0\n w = open(base_path+'labels.dat','w')\n cats = [];label = defaultdict(int)\n for cat in os.listdir(base_path):\n \tif os.path .isfile(base_path+cat):\n \t continue\n \tw.write(str(cat)+'\\n')\n \t\n \tif str(cat) not in cats:\n \t cats.append(str(cat))\n \tfor img in os.listdir(base_path+cat):\n \t imID = img.split('.')[0]\n \t imFmt = img.split('.')[1]\n \t if not imID.isdigit():\n \t \tcontinue\n \t label[int(imID)] = int(catID)\n \t w.write(str(catID)+' '+imID+'\\n')\n \tcatID = catID + 1\n return label\n" }, { "alpha_fraction": 0.6156509518623352, "alphanum_fraction": 0.6405817270278931, "avg_line_length": 32.604652404785156, "blob_id": "52609bc669e1827fb1e5ddfa4c1490f7ee5f2e16", "content_id": "41a80469345156f4f30a8017d81b3f40ebe1a30f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1444, "license_type": "no_license", "max_line_length": 86, "num_lines": 43, "path": "/src/sift.py", "repo_name": "dayouzi/cv_p1", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport cv2,sys,os\nimport numpy as np\nfrom util import getArgMap\nfrom collections import *\nimport sklearn\nfrom sklearn.cluster import KMeans\nargMap = getArgMap(sys.argv[1:])\nsuffix = argMap.get('-s','')\nbase_path = '../corpus/vis'+suffix+'catfiles/'\ndef reformat():\n cats = [];label = defaultdict(int)\n for cat in os.listdir(base_path):\n \tif os.path .isfile(base_path+cat):\n \t continue\n \tfor img in os.listdir(base_path+cat):\n \t imID = img.split('.')[0]\n \t if img.split('.')[1] == 'gif' or img.split('.')[1] == 'x-ms-bmp' :\n \t \tcontinue\n \t if not imID.isdigit():\n \t \tcontinue\n \t print base_path+cat+'/'+img\n \t img1 = cv2.resize(cv2.imread(base_path+cat+'/'+img),(128,128))\n \t img2= cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)\n \t cv2.imwrite(base_path+cat+'/'+img,img2)\n#http://nbviewer.ipython.org/gist/kislayabhi/abb68be1b0be7148e7b7\ndef sift():\n descriptor_mat=[]\n k=200\n sift = cv2.SIFT()\n kp,des = sift.detectAndCompute(gray,None)\n descriptor_mat.append(des)\n descriptor_mat=np.double(np.vstack(descriptor_mat))\n descriptor_mat=descriptor_mat.T\n distance=EuclideanDistance(sg_descriptor_mat_features, sg_descriptor_mat_features)\n kmeans=KMeans(k, distance)\n kmeans.train()\n cluster_centers=(kmeans.get_cluster_centers())\n \nif __name__ == \"__main__\":\n reformat()" }, { "alpha_fraction": 0.5625, "alphanum_fraction": 0.5696022510528564, "avg_line_length": 25.11111068725586, "blob_id": "011dfd2a02be49a0a424fa66191658c603a7f00f", "content_id": "5f8efdc23d7347c1b794a77781592b9bde1c3004", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 704, "license_type": "no_license", "max_line_length": 46, "num_lines": 27, "path": "/src/prepo.py~", "repo_name": "dayouzi/cv_p1", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport collections, os, sys\nfrom util import getArgMap\n\nargMap = getArgMap(sys.argv[1:])\n#catname = argMap.get('-b','')\nsuffix = argMap.get('-s','')\nbase_path = '../corpus/vis'+suffix+'catfiles/'\ndef genLabels():\n catID = 0\n cats = []\n for cat in os.listdir(base_path):\n \tif os.path .isfile(base_path+cat):\n \t continue\n \tprint str(cat)\n \tcatID = catID + 1\n \tif str(cat) not in cats:\n \t cats.append(str(cat))\n \tfor img in os.listdir(base_path+cat):\n \t #print str(img)\n \t imID = img.split('.')[0]\n \t print str(catID)+' '+imID\n \t \nif __name__ == \"__main__\":\n genLabels()" }, { "alpha_fraction": 0.6014407277107239, "alphanum_fraction": 0.6094709634780884, "avg_line_length": 35.821739196777344, "blob_id": "2a7134ad24af68fef0e333721856b42be4991db7", "content_id": "fb8b188b4bce7cf95a689cad228626fabfafd87b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8468, "license_type": "no_license", "max_line_length": 134, "num_lines": 230, "path": "/src/prepro.py~", "repo_name": "dayouzi/cv_p1", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.cross_validation import StratifiedShuffleSplit\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.cross_validation import KFold\nfrom sklearn.cross_validation import LeavePLabelOut\nimport numpy as np\nimport random,sys\nfrom collections import *\nfrom util import getArgMap\nimport random \n#example: http://sebastianraschka.com/Articles/2014_scikit_dataprocessing.html\n#f_set: feature number\nargMap = getArgMap(sys.argv[1:])\nbookname = argMap.get('-b','')\n\ndef preprocess(data_file,train_file,test_file,byID=1,f_set=[],sampleMethod = 'oversampling', norm=1):\n x,y,labels = readFile(data_file)\n train_index,test_index,labels_train,labels_test = splitFileByID(labels)\n #print labels_train\n #print labels_test\n x_train,y_train,x_test,y_test=[],[],[],[]\n \n lnew_train = sampling(y[train_index],'None')\n #lnew_test = sampling(y[test_index], 'oversampling')\n y_test=y[test_index]\n x_test=x[test_index]\n labels_train_sample=[]\n for l in lnew_train:\n for i in lnew_train[l]:\n x_train.append(x[train_index][i])\n y_train.append(y[train_index][i])\n labels_train_sample.append(labels_train[i])\n \n x_train=np.array(x_train)\n #y_train=np.array(y_train)\n x_test=np.array(x_test)\n #y_test=np.array(y_test)\n if norm == 1:\n x_train,x_test = normalize(x_train,x_test)\n #print x_train\n #print x_test\n data_samples_train=defaultdict(list)\n data_samples_test=defaultdict(list)\n \n for i,l in enumerate(x_train):\n \ttemp=[]\n \ttemp.append(y_train[i])\n \ttemp.append(int(labels_train_sample[i]))\n \tfor ll in l:\n \t temp.append(ll)\n \tdata_samples_train[labels_train_sample[i]].append(temp)\n #print data_samples_train\n for i,l in enumerate(x_test):\n \ttemp=[]\n \ttemp.append(y_test[i])\n \ttemp.append(int(labels_test[i]))\n \tfor ll in l:\n \t temp.append(ll)\n \tdata_samples_test[labels_test[i]].append(temp)\n #print data_samples_train\n x_train,y_train,x_test,y_test=[],[],[],[]\n for x in sorted(data_samples_train.keys()):\n \tfor sample in data_samples_train[x]:\n \t y_train.append(sample[0])\n \t x_train.append(sample[1:])\n \n for x in sorted(data_samples_test.keys()):\n \tfor sample in data_samples_test[x]:\n \t y_test.append(sample[0])\n \t x_test.append(sample[1:])\n saveFile2(train_file,test_file,x_train,x_test,y_train,y_test)\n return train_index,test_index\n#byID: data is continous, e.g., queries from same chapter are put together\n# if byID == 1: 0:label ID: the ID that shuffle and split is based on\ndef readFile(data_file,deli='\\t',byID=1,f_set=[],libsvm=-1):\n all_data = np.loadtxt(open(data_file,\"r\"),\n delimiter=deli,\n dtype=np.float64\n )\n labels=[]\n y = np.array(all_data[:,0])\n if f_set==[]:\n if byID == 1:\n #involve chapter id as a prior\n x = np.array(all_data[:,2:])\n labels = np.array(all_data[:,1])\n else:\n x = np.array(all_data[:,1:])\n else:\n for f in f_set:\n x = np.array(all_data[:,f_set])\n return x,y,labels\ndef multi2bin(target,y):\n print target\n y_new=[]\n for i in xrange(len(y)):\n \tif int(y[i]) == int(target):\n \t y_new.append(1)\n \telse:\n \t y_new.append(0)\n return np.array(y_new)\ndef splitFile(y,test_size=0.20,random_state=0): \n #x_train, x_test, y_train, y_test = train_test_split(x, y, test_size, random_state)\n sss = StratifiedShuffleSplit(y, 3, 0.2,0.8)\n train_index,test_index = [],[]\n for idx1,idx2 in sss:\n train_index = idx1\n test_index = idx2\n #pl = int(len(set(y))*test_size)\n #print pl\n #sss = LeavePLabelOut(y,pl)\n return train_index,test_index\n#use data file with section number\ndef splitFileByID(labels,test_size=0.2):\n pl = int(len(set(labels))*test_size)\n rs = random.sample(set(labels),pl)\n #print rs\n train_index,test_index,ids_train, ids_test = [],[],[],[]\n for i,label in enumerate(labels):\n if label in rs:\n test_index.append(i)\n ids_test.append(label)\n else:\n train_index.append(i) \n ids_train.append(label)\n return train_index,test_index,ids_train,ids_test\n#num: target number \ndef sampleHelper(num,c_num):\n if num == len(c_num):\n \treturn c_num\n ret = []\n if num > len(c_num):\n \tfor i in xrange(min(int(num/len(c_num)),10000)):\n \t for j in c_num:\n \t \tret.append(j)\n ks = random.sample(xrange(len(c_num)-1),num%len(c_num))\n for k in ks:\n ret.append(c_num[k])\n if num < len(c_num):\n \tks = random.sample(xrange(len(c_num)-1),num)\n \tfor k in ks:\n \t ret.append(c_num[k])\n return ret\ndef sampling(y,sampleMethod = 'oversampling'):\n class_num = defaultdict(int)\n lorg = defaultdict(list)\n lnew = defaultdict(list)\n \n for i,l in enumerate(y):\n class_num[l] = class_num[l]+1\n lorg[l].append(i)\n if sampleMethod == 'undersampling':\n min_num = min([class_num[x] for x in class_num])\n for l in class_num.keys():\n \tret = sampleHelper(min_num,lorg[l])\n for r in ret:\n lnew[l].append(r)\n \n elif sampleMethod == 'oversampling':\n max_num = max([class_num[x] for x in class_num])\n for l in class_num:\n ret = sampleHelper(max_num,lorg[l])\n for r in ret:\n \tlnew[l].append(r)\n elif sampleMethod == 'middlesampling':\n \tnum = sorted(class_num.items(), key=lambda x: x[1] )[int(len(class_num)/2)][1]\n \tfor l in class_num:\n \t ret = sampleHelper(num,lorg[l])\n \t for r in ret:\n \t \tlnew[l].append(r)\n else:\n return lorg\n return lnew\n \ndef normalize(x_train,x_test):\n minmax_scale = preprocessing.MinMaxScaler(feature_range=(0, 1)).fit(x_train)\n x_train = minmax_scale.transform(x_train)\n x_test = minmax_scale.transform(x_test)\n return x_train,x_test\n\ndef saveFile(train_result,test_result,x_train,x_test,y_train,y_test): \n training_data = np.hstack((y_train.reshape(y_train.shape[0], 1), x_train))\n test_data = np.hstack((y_test.reshape(y_test.shape[0], 1), x_test))\n np.savetxt(train_result, training_data, delimiter=' ')\n np.savetxt(test_result, test_data, delimiter=' ')\ndef saveFile2(train_result,test_result,x_train,x_test,y_train,y_test,svm_light = -1):\n w_train = open(train_result,'w')\n #w_train_binary = open(train_result+'_binary','w')\n for i,x in enumerate(x_train):\n \tif svm_light == 1:\n \t w_train.write(str(int(y_train[i]))+' '+' '.join(str(i+1)+':'+str(xx) for i,xx in enumerate(x))+'\\n')\n \telse:\n \t w_train.write(str(int(y_train[i]))+' '+' '.join(str(xx) for xx in x)+'\\n')\n w_test = open(test_result,'w')\n #w_test_binary = open(test_result+'_binary','w')\n for i,x in enumerate(x_test):\n \tif svm_light == 1:\n \t w_test.write(str(int(y_test[i]))+' '+' '.join(str(i+1)+':'+str(xx) for i,xx in enumerate(x))+'\\n')\n \telse:\n \t w_test.write(str(int(y_test[i]))+' '+' '.join(str(xx) for xx in x)+'\\n')\ndef writeFile(data_file,suffix,train_index,test_index):\n i=0\n #print train_index\n #print test_index\n w_train=open(data_file+suffix+'_train','w')\n w_test=open(data_file+suffix+'_test','w')\n with open(data_file+'.features_org','r') as reader:\n for line in reader:\n if i in train_index:\n w_train.write(line)\n else:\n w_test.write(line)\n i=i+1\n'''\ndef main():\n #preprocess(data_file,result_file)\n fn = '../../test/'+bookname+'/'+bookname\n train_index,test_index=preprocess(fn+'.features_id',fn+'.train',fn+'.test')\n writeFile(fn,'.org',train_index,test_index)\n train_index,test_index = preprocess(fn+'.features_id',fn+'.train_unbalanced',fn+'.test_unbalanced',[],'None')\n writeFile(fn,'.unbalanced_org',train_index,test_index)\n train_index,test_index = preprocess(fn+'.features_binary_id',fn+'.train_binary',fn+'.test_binary',[],'oversampling')\n writeFile(fn,'.binary_org',train_index,test_index)\n train_index,test_index = preprocess(fn+'.features_binary_id',fn+'.train_binary_unbalanced',fn+'.test_binary_unbalanced',[],'None')\n writeFile(fn,'binary_unbalanced_org',train_index,test_index)\nmain()\n'''" }, { "alpha_fraction": 0.585533857345581, "alphanum_fraction": 0.5966322422027588, "avg_line_length": 34.236488342285156, "blob_id": "5378f33fa4a05d3b0cb92eb85abac3d8ba1ea5fa", "content_id": "c12864d5a706911b5a44a1fb075bb19f9421edf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5226, "license_type": "no_license", "max_line_length": 109, "num_lines": 148, "path": "/src/prepro.py", "repo_name": "dayouzi/cv_p1", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.cross_validation import StratifiedShuffleSplit\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.cross_validation import KFold\nfrom sklearn.cross_validation import LeavePLabelOut\nimport numpy as np\nimport random,sys\nfrom collections import *\nfrom util import getArgMap\nimport random \n#example: http://sebastianraschka.com/Articles/2014_scikit_dataprocessing.html\n#f_set: feature number\nargMap = getArgMap(sys.argv[1:])\nbookname = argMap.get('-b','')\n\n\n#byID: data is continous, e.g., queries from same chapter are put together\n# if byID == 1: 0:label ID: the ID that shuffle and split is based on\ndef readFile(data_file,deli='\\t',byID=1,f_set=[],libsvm=-1):\n all_data = np.loadtxt(open(data_file,\"r\"),\n delimiter=deli,\n dtype=np.float64\n )\n labels=[]\n y = np.array(all_data[:,0])\n if f_set==[]:\n if byID == 1:\n #involve chapter id as a prior\n x = np.array(all_data[:,2:])\n labels = np.array(all_data[:,1])\n else:\n x = np.array(all_data[:,1:])\n else:\n for f in f_set:\n x = np.array(all_data[:,f_set])\n return x,y,labels\n \ndef multi2bin(target,y):\n print target\n y_new=[]\n for i in xrange(len(y)):\n \tif int(y[i]) == int(target):\n \t y_new.append(1)\n \telse:\n \t y_new.append(0)\n return np.array(y_new)\n \ndef splitFile(y,test_size=0.20,random_state=0): \n #x_train, x_test, y_train, y_test = train_test_split(x, y, test_size, random_state)\n sss = StratifiedShuffleSplit(y, 3, 0.2,0.8)\n train_index,test_index = [],[]\n for idx1,idx2 in sss:\n train_index = idx1\n test_index = idx2\n return train_index,test_index\n \n#use data file with section number\ndef splitFileByID(labels,test_size=0.2):\n pl = int(len(set(labels))*test_size)\n rs = random.sample(set(labels),pl)\n #print rs\n train_index,test_index,ids_train, ids_test = [],[],[],[]\n for i,label in enumerate(labels):\n if label in rs:\n test_index.append(i)\n ids_test.append(label)\n else:\n train_index.append(i) \n ids_train.append(label)\n return train_index,test_index,ids_train,ids_test\n#num: target number \ndef sampleHelper(num,c_num):\n if num == len(c_num):\n \treturn c_num\n ret = []\n if num > len(c_num):\n \tfor i in xrange(min(int(num/len(c_num)),10000)):\n \t for j in c_num:\n \t \tret.append(j)\n ks = random.sample(xrange(len(c_num)-1),num%len(c_num))\n for k in ks:\n ret.append(c_num[k])\n if num < len(c_num):\n \tks = random.sample(xrange(len(c_num)-1),num)\n \tfor k in ks:\n \t ret.append(c_num[k])\n return ret\ndef sampling(y,sampleMethod = 'oversampling'):\n class_num = defaultdict(int)\n lorg = defaultdict(list)\n lnew = defaultdict(list)\n \n for i,l in enumerate(y):\n class_num[l] = class_num[l]+1\n lorg[l].append(i)\n if sampleMethod == 'undersampling':\n min_num = min([class_num[x] for x in class_num])\n for l in class_num.keys():\n \tret = sampleHelper(min_num,lorg[l])\n for r in ret:\n lnew[l].append(r)\n \n elif sampleMethod == 'oversampling':\n max_num = max([class_num[x] for x in class_num])\n for l in class_num:\n ret = sampleHelper(max_num,lorg[l])\n for r in ret:\n \tlnew[l].append(r)\n elif sampleMethod == 'middlesampling':\n \tnum = sorted(class_num.items(), key=lambda x: x[1] )[int(len(class_num)/2)][1]\n \tfor l in class_num:\n \t ret = sampleHelper(num,lorg[l])\n \t for r in ret:\n \t \tlnew[l].append(r)\n else:\n return lorg\n return lnew\n \ndef normalize(x_train,x_test):\n minmax_scale = preprocessing.MinMaxScaler(feature_range=(0, 1)).fit(x_train)\n x_train = minmax_scale.transform(x_train)\n x_test = minmax_scale.transform(x_test)\n return x_train,x_test\n\ndef saveFile(train_result,test_result,x_train,x_test,y_train,y_test): \n training_data = np.hstack((y_train.reshape(y_train.shape[0], 1), x_train))\n test_data = np.hstack((y_test.reshape(y_test.shape[0], 1), x_test))\n np.savetxt(train_result, training_data, delimiter=' ')\n np.savetxt(test_result, test_data, delimiter=' ')\n \ndef saveFile2(train_result,test_result,x_train,x_test,y_train,y_test,svm_light = -1):\n w_train = open(train_result,'w')\n #w_train_binary = open(train_result+'_binary','w')\n for i,x in enumerate(x_train):\n \tif svm_light == 1:\n \t w_train.write(str(int(y_train[i]))+' '+' '.join(str(i+1)+':'+str(xx) for i,xx in enumerate(x))+'\\n')\n \telse:\n \t w_train.write(str(int(y_train[i]))+' '+' '.join(str(xx) for xx in x)+'\\n')\n w_test = open(test_result,'w')\n #w_test_binary = open(test_result+'_binary','w')\n for i,x in enumerate(x_test):\n \tif svm_light == 1:\n \t w_test.write(str(int(y_test[i]))+' '+' '.join(str(i+1)+':'+str(xx) for i,xx in enumerate(x))+'\\n')\n \telse:\n \t w_test.write(str(int(y_test[i]))+' '+' '.join(str(xx) for xx in x)+'\\n')\n \t \n\n" }, { "alpha_fraction": 0.6005675792694092, "alphanum_fraction": 0.611919105052948, "avg_line_length": 33.71604919433594, "blob_id": "172312c75efa8cb8ebabd7779eb03e533d82b3be", "content_id": "d7ca0ee0132fb240849788682224ee0fbcd36413", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2819, "license_type": "no_license", "max_line_length": 88, "num_lines": 81, "path": "/src/fileGen.py", "repo_name": "dayouzi/cv_p1", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom prepro import *\nimport collections, os, sys\nfrom util import getArgMap\nfrom collections import *\nfrom sklearn.metrics import confusion_matrix\n\nargMap = getArgMap(sys.argv[1:])\nsuffix = argMap.get('-s','')#suffix of training and testing files\nnumber = argMap.get('-n','')#number of cats\nbase_path = '../corpus/vis'+number+'catfiles/'\nfeat_suffix = argMap.get('-f','')\n\n\ndef genFeatFile(numCenter):\n label = genLabels()\n r = open(base_path+'assg_'+feat_suffix+'.dat','r')\n w = open(base_path+'features_'+feat_suffix,'w')\n feat = defaultdict(lambda: defaultdict(int))\n for line in r:\n \tif len(line.split('\\t')) < 2:\n \t continue\n \timgID = int(line.split('\\t')[0]);centerID = int(line.split('\\t')[1].strip())\n \tfeat[imgID][centerID-1] = feat[imgID][centerID-1]+1\n \n for i in sorted(feat.keys()):\n \tif i not in label.keys():\n \t continue\n \tw.write( str(label[i])+'\\t')\n \tw.write('\\t'.join(str(feat[i][c]) for c in xrange(numCenter)))\n \tw.write('\\n')\n \t\n#target is specified as number of images classes in binary classification\n#default '' in multi-classification\ndef genTrainTestFiles(x,y,target='',samplingMethod = 'oversampling',folds=3):\n train_file = base_path + 'train_'+'all_'+feat_suffix\n test_file = base_path + 'test_'+'all_'+feat_suffix\n lnew = sampling(y,samplingMethod)\n X_new,y_new = [],[]\n for l in lnew:\n \tfor i in lnew[l]:\n \t X_new.append(x[i])\n \t y_new.append(y[i])\n saveFile2(train_file,test_file,X_new,[],y_new,[],1) #write file in svm-light format\n skf = StratifiedKFold(y, n_folds=folds, shuffle = True)\n k=0\n for train_index, test_index in skf:\n \ttrain_file = base_path + 'train_'+str(k)+target+'_'+feat_suffix\n \ttest_file = base_path + 'test_'+ str(k)+target+'_'+feat_suffix\n \tk=k+1\n \tX_test = x[test_index]\n \ty_train, y_test = y[train_index], y[test_index]\n \tlnew_train = sampling(y_train,samplingMethod)\n \tX_train, y_train = [],[]\n \tfor l in lnew_train:\n \t for i in lnew_train[l]:\n \t \tX_train.append(x[train_index][i])\n \t \ty_train.append(y[train_index][i])\n \t\n \tsaveFile2(train_file,test_file,X_train,X_test,y_train,y_test,1) \n#generate binary classification training/testing files\ndef genBinTrainTestFiles(x,y):\n for yy in set(y):\n \tc1,c2=0,0\n \ty_new = multi2bin(yy,y)\n \tfor k in y_new:\n \t if k==1:\n \t \tc1=c1+1\n \t elif k==0:\n \t \tc2=c2+1\n \tgenTrainTestFiles(x,y_new,'_'+str(int(yy)))\n \t\nif __name__ == \"__main__\":\n \n genFeatFile(200) #numbers of centers\n data_file = base_path+'features_'+feat_suffix\n x,y,labels = readFile(data_file,'\\t',0)\n genTrainTestFiles(x,y,'','None')\n #genBinTrainTestFiles(x,y)\n \n " }, { "alpha_fraction": 0.5582412481307983, "alphanum_fraction": 0.5688305497169495, "avg_line_length": 30.48550796508789, "blob_id": "9d711f62bd7a2a78d232adf59a402ff1c1a189c4", "content_id": "de7a9721d55b35a1a8aadcb1df471f11b971b5f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4344, "license_type": "no_license", "max_line_length": 81, "num_lines": 138, "path": "/src/fileGen.py~", "repo_name": "dayouzi/cv_p1", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nfrom prepro import *\nimport collections, os, sys\nfrom util import getArgMap\nfrom collections import *\nfrom sklearn.metrics import confusion_matrix\n\nargMap = getArgMap(sys.argv[1:])\n#catname = argMap.get('-b','')\nsuffix = argMap.get('-s','') \nnumber = argMap.get('-n','')\nbase_path = '../corpus/vis'+number+'catfiles/'\nfeat_suffix = argMap.get('-f','0')\ndef getConfMatrix():\n y_true, y_pre = [],[]\n r = open(base_path+'test_'+feat_suffix+suffix)\n for line in r:\n \ty_true.append(int(line.split()[0]))\n r = open(base_path+feat_suffix+'_output'+suffix)\n for line in r:\n \ty_pre.append(int(line))\n cm = confusion_matrix(y_true, y_pre)\n print cm\ndef genLabels():\n catID = 0\n w = open(base_path+'labels.dat','w')\n cats = [];label = defaultdict(int)\n for cat in os.listdir(base_path):\n \tif os.path .isfile(base_path+cat):\n \t continue\n \tw.write(str(cat)+'\\n')\n \t\n \tif str(cat) not in cats:\n \t cats.append(str(cat))\n \tfor img in os.listdir(base_path+cat):\n \t #print str(img)\n \t imID = img.split('.')[0]\n \t imFmt = img.split('.')[1]\n \t if not imID.isdigit() or imFmt == 'gif':\n \t \tcontinue\n \t label[int(imID)] = int(catID)\n \t w.write(str(catID)+' '+imID+'\\n')\n \tcatID = catID + 1\n return label\ndef genFeat(numCenter):\n \n r = open(base_path+'assg.dat','r')\n temp = [0 for i in xrange(numCenter)]\n feat = defaultdict(lambda: defaultdict(int))\n for line in r:\n \timgID = int(line.split('\\t')[0]);centerID = int(line.split('\\t')[1].strip())\n \tfeat[imgID][centerID] = feat[imgID][centerID]+1\n for i in sorted(feat.keys()):\n \tprint str(i)+'\\t',\n \tfor c in xrange(numCenter):\n \t print str(feat[i][c])+'\\t',\n \tprint '\\n'\n \t\ndef genFeatFile(numCenter):\n label = genLabels()\n r = open(base_path+'assg_'+suffix+'.dat','r')\n w = open(base_path+'features_'+suffix,'w')\n #temp = [0 for i in xrange(numCenter)]\n feat = defaultdict(lambda: defaultdict(int))\n for line in r:\n \tif len(line.split('\\t')) < 2:\n \t continue\n \timgID = int(line.split('\\t')[0]);centerID = int(line.split('\\t')[1].strip())\n \tfeat[imgID][centerID-1] = feat[imgID][centerID-1]+1\n \n for i in sorted(feat.keys()):\n \tif i not in label.keys():\n \t #print i\n \t continue\n \tw.write( str(label[i])+'\\t')\n \tw.write('\\t'.join(str(feat[i][c]) for c in xrange(numCenter)))\n \tw.write('\\n')\n \t\ndef genTrainTestFiles(x,y,target='',samplingMethod = 'oversampling',folds=5):\n train_file = base_path + 'train_'+target+'all_'+suffix\n test_file = base_path + 'test_'+ target+'all_'+suffix\n lnew = sampling(y,'undersampling')\n X_new,y_new = [],[]\n for l in lnew:\n \tfor i in lnew[l]:\n \t X_new.append(x[i])\n \t y_new.append(y[i])\n saveFile2(train_file,test_file,X_new,[],y_new,[],1) \n skf = StratifiedKFold(y, n_folds=5, shuffle = True)\n k=0\n for train_index, test_index in skf:\n \ttrain_file = base_path + 'train_'+str(k)+target+'_'+suffix\n \ttest_file = base_path + 'test_'+ str(k)+target+'_'+suffix\n \tk=k+1\n \tprint k\n \tX_test = x[test_index]\n \ty_train, y_test = y[train_index], y[test_index]\n \tlnew_train = sampling(y_train,samplingMethod)\n \t#lnew_test = sampling(y_test,samplingMethod)\n \tX_train, y_train = [],[]\n \t#X_test, y_test = [],[]\n \tfor l in lnew_train:\n \t for i in lnew_train[l]:\n \t \tX_train.append(x[train_index][i])\n \t \ty_train.append(y[train_index][i])\n \t'''\n \tfor l in lnew_test:\n \t for i in lnew_test[l]:\n \t \tX_test.append(x[test_index][i])\n \t \ty_test.append(y[test_index][i])\n \t'''\n \tsaveFile2(train_file,test_file,X_train,X_test,y_train,y_test,1) \ndef genBinTrainTestFiles(x,y):\n for yy in set(y):\n \t#print yy\n \tc1,c2=0,0\n \t\n \ty_new = multi2bin(yy,y)\n \t\n \tfor k in y_new:\n \t if k==1:\n \t \tc1=c1+1\n \t elif k==0:\n \t \tc2=c2+1\n \tprint c1,c2\n \t#print y\n \tgenTrainTestFiles(x,y_new,'_'+str(int(yy)))\nif __name__ == \"__main__\":\n \n genFeatFile(100)\n data_file = base_path+'features_'+suffix\n x,y,labels = readFile(data_file,'\\t',0)\n genTrainTestFiles(x,y,'','None')\n genBinTrainTestFiles(x,y)\n \n #getConfMatrix()" } ]
10
kchaitanya863/remoteAdmin
https://github.com/kchaitanya863/remoteAdmin
1033200f1976becb40aef583358d8edc9c72b699
16192cc39628cbc114609ee6e9f0e0765736cc6a
bc0bcaec02085a694bc2a9a8813d2cafc1ca3671
refs/heads/master
2020-12-02T08:42:02.723989
2020-02-03T07:33:14
2020-02-03T07:33:14
230,946,546
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6217391490936279, "alphanum_fraction": 0.6217391490936279, "avg_line_length": 17.440000534057617, "blob_id": "7e377bd82629b0556d08d1dbc228a211a903b21b", "content_id": "b01e701617131c9e8aeea97eb482ea52f7609fb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 460, "license_type": "no_license", "max_line_length": 43, "num_lines": 25, "path": "/server.py", "repo_name": "kchaitanya863/remoteAdmin", "src_encoding": "UTF-8", "text": "from flask import Flask\nimport subprocess\n\nfrom flask import request\napp = Flask(__name__)\n\n\n\[email protected]('/')\ndef hello_world():\n return 'remoteAdmin is up and running!'\n\n#run as /run?cmd=curl -s ifconfig.co\[email protected]('/run')\ndef run():\n cmd = request.args.get('cmd')\n try:\n output = subprocess.getoutput(cmd)\n except Exception as e:\n output = str(e)\n print(output)\n return str(output)\n\nif __name__ == '__main__':\n app.run()" }, { "alpha_fraction": 0.7575405836105347, "alphanum_fraction": 0.7575405836105347, "avg_line_length": 29.821428298950195, "blob_id": "0345f797329be81a1303b74fd8c1e0b9034a7212", "content_id": "943d510625e24c7d269768896e4e9af211c2582b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 862, "license_type": "no_license", "max_line_length": 88, "num_lines": 28, "path": "/README.md", "repo_name": "kchaitanya863/remoteAdmin", "src_encoding": "UTF-8", "text": "# remoteAdmin\nAn all powerful tool for remote administration, in making!\nPrimary focus will be on getting Linux systems to have client and a Linux/docker server.\nTool shall be able to monitor/command \"clients\".\n\nServer:\n\n - Store client metrics (Disk space, RAM, CPU,..) probably in Elasticsearch.\n - Chart client metrics\n - Have alerts on metrics\n - Remote Shell to client\n - Execute remote commands on client(s)\n - Manage processes on client OS.\n - Upload & Download files from clients\n - remote execute a shell script to client\n - expose client ports to internet using ngrok (not important)\n - Nice to have:\n\t - Role based authentications\n\t - Teams\n\t - Timed team/Individual ssh time\n\t - Alert Teams/Individuals on client not active\n\nClient:\n\n - Send system metrics\n - Send additional metrics about custom services\n - Run server commands\n - Provide remote ssh" } ]
2
melizalab/gammatone
https://github.com/melizalab/gammatone
dcfab998e24ddfdf3d8d6c4be829c54a4756fc9e
087b2e8bb0024b0535ad31451075805aa68a5eca
c69c933c8a6bd0eb2f6da4717fb5965cb2185670
refs/heads/master
2022-11-30T02:41:24.411761
2022-11-22T18:57:37
2022-11-22T18:57:37
209,094,417
0
0
BSD-3-Clause
2019-09-17T15:45:28
2017-04-13T21:19:38
2017-04-13T21:24:04
null
[ { "alpha_fraction": 0.6347366571426392, "alphanum_fraction": 0.6422615647315979, "avg_line_length": 26.779661178588867, "blob_id": "f2b46b51d1310dee90c53a6536e6096403a95966", "content_id": "e1c63b353c251807b8cb27f3445a2ef02d005f76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4917, "license_type": "permissive", "max_line_length": 81, "num_lines": 177, "path": "/gammatone/plot.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# -*- mode: python -*-\n\"\"\"\nPlotting utilities related to gammatone analysis, primarily for use with\n``matplotlib``.\n\nSee COPYING for copyright and licensing information.\n\n\"\"\"\nfrom __future__ import division\nimport argparse\nimport os.path\n\nimport matplotlib.pyplot\nimport matplotlib.ticker\nimport numpy as np\nimport scipy.constants\nimport scipy.io.wavfile\n\nfrom .filters import erb_point\nimport gammatone.gtgram\nimport gammatone.fftweight\n\n\nclass ERBFormatter(matplotlib.ticker.EngFormatter):\n \"\"\"\n Axis formatter for gammatone filterbank analysis. This formatter calculates\n the ERB spaced frequencies used for analysis, and renders them similarly to\n the engineering axis formatter.\n\n The scale is changed so that `[0, 1]` corresponds to ERB spaced frequencies\n from ``high_freq`` to ``low_freq`` (note the reversal). It should be used\n with ``imshow`` where the ``extent`` argument is ``[a, b, 1, 0]`` (again,\n note the inversion).\n \"\"\"\n\n def __init__(self, low_freq, high_freq, *args, **kwargs):\n \"\"\"\n Creates a new :class ERBFormatter: for use with ``matplotlib`` plots.\n Note that this class does not supply the ``units`` or ``places``\n arguments; typically these would be ``'Hz'`` and ``0``.\n\n :param low_freq: the low end of the gammatone filterbank frequency range\n :param high_freq: the high end of the gammatone filterbank frequency\n range\n \"\"\"\n self.low_freq = low_freq\n self.high_freq = high_freq\n super().__init__(*args, **kwargs)\n\n def _erb_axis_scale(self, fraction):\n return erb_point(self.low_freq, self.high_freq, fraction)\n\n def __call__(self, val, pos=None):\n newval = self._erb_axis_scale(val)\n return super().__call__(newval, pos)\n\n\ndef gtgram_plot(\n gtgram_function,\n axes,\n x,\n fs,\n window_time,\n hop_time,\n channels,\n f_min,\n imshow_args=None,\n):\n \"\"\"\n Plots a spectrogram-like time frequency magnitude array based on gammatone\n subband filters.\n\n :param gtgram_function: A function with signature::\n\n fft_gtgram(\n wave,\n fs,\n window_time, hop_time,\n channels,\n f_min)\n\n See :func:`gammatone.gtgram.gtgram` for details of the paramters.\n \"\"\"\n # Set a nice formatter for the y-axis\n formatter = ERBFormatter(f_min, fs / 2, unit=\"Hz\", places=0)\n axes.yaxis.set_major_formatter(formatter)\n\n # Figure out time axis scaling\n duration = len(x) / fs\n\n # Calculate 1:1 aspect ratio\n aspect_ratio = duration / scipy.constants.golden\n\n gtg = gtgram_function(x, fs, window_time, hop_time, channels, f_min)\n Z = np.flipud(20 * np.log10(gtg))\n\n return axes.imshow(Z, extent=[0, duration, 1, 0], aspect=aspect_ratio)\n\n\n# Entry point for CLI script\n\nHELP_TEXT = \"\"\"\\\nPlots the gammatone filterbank analysis of a WAV file.\n\nIf the file contains more than one channel, all channels are averaged before\nperforming analysis.\n\"\"\"\n\n\ndef render_audio_from_file(path, duration, function):\n \"\"\"\n Renders the given ``duration`` of audio from the audio file at ``path``\n using the gammatone spectrogram function ``function``.\n \"\"\"\n samplerate, data = scipy.io.wavfile.read(path)\n\n # Average the stereo signal\n if duration:\n nframes = duration * samplerate\n data = data[0:nframes, :]\n\n signal = data.mean(1)\n\n # Default gammatone-based spectrogram parameters\n twin = 0.08\n thop = twin / 2\n channels = 1024\n fmin = 20\n\n # Set up the plot\n fig = matplotlib.pyplot.figure()\n axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])\n\n gtgram_plot(function, axes, signal, samplerate, twin, thop, channels, fmin)\n\n axes.set_title(os.path.basename(path))\n axes.set_xlabel(\"Time (s)\")\n axes.set_ylabel(\"Frequency\")\n\n matplotlib.pyplot.show()\n\n\ndef main():\n \"\"\"\n Entry point for CLI application to plot gammatonegrams of sound files.\n \"\"\"\n parser = argparse.ArgumentParser(description=HELP_TEXT)\n\n parser.add_argument(\n \"sound_file\",\n help=\"The sound file to graph. See the help text for supported formats.\",\n )\n\n parser.add_argument(\n \"-d\",\n \"--duration\",\n type=int,\n help=\"The time in seconds from the start of the audio to use for the \"\n \"graph (default is to use the whole file).\",\n )\n\n parser.add_argument(\n \"-a\",\n \"--accurate\",\n action=\"store_const\",\n dest=\"function\",\n const=gammatone.gtgram.gtgram,\n default=gammatone.fftweight.fft_gtgram,\n help=\"Use the full filterbank approach instead of the weighted FFT \"\n \"approximation. This is much slower, and uses a lot of memory, but\"\n \" is more accurate.\",\n )\n\n args = parser.parse_args()\n\n return render_audio_from_file(args.sound_file, args.duration, args.function)\n" }, { "alpha_fraction": 0.58370041847229, "alphanum_fraction": 0.5930616855621338, "avg_line_length": 27.809524536132812, "blob_id": "f3df43fb6b4e011201ffa9483ae12ffb1e9850de", "content_id": "bd3107eac530ad98eaab1245588cf509c6457b65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1816, "license_type": "permissive", "max_line_length": 80, "num_lines": 63, "path": "/tests/test_erb_space.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# Copyright 2014 Jason Heeris, [email protected]\n# \n# This file is part of the gammatone toolkit, and is licensed under the 3-clause\n# BSD license: https://github.com/detly/gammatone/blob/master/COPYING\nimport numpy as np\nimport scipy.io\nfrom pkg_resources import resource_stream\n\nimport gammatone.filters\n\nREF_DATA_FILENAME = 'data/test_erbspace_data.mat'\n\nINPUT_KEY = 'erbspace_inputs'\nRESULT_KEY = 'erbspace_results'\n\nINPUT_COLS = ('f_low', 'f_high', 'num_f')\nRESULT_COLS = ('cfs',)\n\n\ndef load_reference_data():\n \"\"\" Load test data generated from the reference code \"\"\"\n # Load test data\n with resource_stream(__name__, REF_DATA_FILENAME) as test_data:\n data = scipy.io.loadmat(test_data, squeeze_me=False)\n \n zipped_data = zip(data[INPUT_KEY], data[RESULT_KEY])\n \n for inputs, refs in zipped_data:\n input_dict = dict(zip(INPUT_COLS, map(np.squeeze, inputs)))\n ref_dict = dict(zip(RESULT_COLS, map(np.squeeze, refs)))\n yield (input_dict, ref_dict)\n \n\ndef test_ERB_space_known_values():\n for inputs, refs in load_reference_data():\n args = (\n inputs['f_low'],\n inputs['f_high'],\n inputs['num_f'],\n )\n \n expected = (refs['cfs'],)\n \n yield ERBSpaceTester(args, expected)\n\n\nclass ERBSpaceTester:\n \n def __init__(self, args, expected):\n self.args = args\n self.expected = expected[0]\n self.description = (\n \"ERB space for {:.1f} {:.1f} {:d}\".format(\n float(self.args[0]),\n float(self.args[1]),\n int(self.args[2]),\n )\n )\n \n def __call__(self):\n result = gammatone.filters.erb_space(*self.args)\n assert np.allclose(result, self.expected, rtol=1e-6, atol=1e-10)\n\n" }, { "alpha_fraction": 0.6248080134391785, "alphanum_fraction": 0.6363286972045898, "avg_line_length": 31.936708450317383, "blob_id": "bbdd04056f9c51842e50be0f03335bf4585298f0", "content_id": "7f48661bda2479089518797b7144c46f1883f9e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2604, "license_type": "permissive", "max_line_length": 80, "num_lines": 79, "path": "/tests/test_fft_weights.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# Copyright 2014 Jason Heeris, [email protected]\n# \n# This file is part of the gammatone toolkit, and is licensed under the 3-clause\n# BSD license: https://github.com/detly/gammatone/blob/master/COPYING\nfrom __future__ import division\nimport numpy as np\nimport scipy.io\nfrom pkg_resources import resource_stream\n\nimport gammatone.fftweight\n\nREF_DATA_FILENAME = 'data/test_fft2gtmx_data.mat'\n\nINPUT_KEY = 'fft2gtmx_inputs'\nRESULT_KEY = 'fft2gtmx_results'\n\nINPUT_COLS = ('nfft', 'sr', 'nfilts', 'width', 'fmin', 'fmax', 'maxlen')\nRESULT_COLS = ('weights', 'gain',)\n\ndef load_reference_data():\n \"\"\" Load test data generated from the reference code \"\"\"\n # Load test data\n with resource_stream(__name__, REF_DATA_FILENAME) as test_data:\n data = scipy.io.loadmat(test_data, squeeze_me=False)\n \n zipped_data = zip(data[INPUT_KEY], data[RESULT_KEY])\n \n for inputs, refs in zipped_data:\n input_dict = dict(zip(INPUT_COLS, map(np.squeeze, inputs)))\n ref_dict = dict(zip(RESULT_COLS, map(np.squeeze, refs)))\n yield (input_dict, ref_dict)\n\n\ndef fft_weights_funcs(args, expected):\n \"\"\"\n Construct a pair of unit tests for the gains and weights of the FFT to\n gammatonegram calculation. Returns two functions: test_gains, test_weights.\n \"\"\"\n args = list(args)\n expected_weights = expected[0]\n expected_gains = expected[1]\n \n # Convert nfft, nfilts, maxlen to ints\n args[0] = int(args[0])\n args[2] = int(args[2])\n args[6] = int(args[6])\n \n weights, gains = gammatone.fftweight.fft_weights(*args)\n \n (test_weights_desc, test_gains_desc) = (\n \"FFT weights {:s} for nfft = {:d}, fs = {:d}, nfilts = {:d}\".format(\n label,\n int(args[0]),\n int(args[1]),\n int(args[2]),\n ) for label in (\"weights\", \"gains\"))\n \n def test_gains():\n assert gains.shape == expected_gains.shape \n assert np.allclose(gains, expected_gains, rtol=1e-6, atol=1e-12)\n \n def test_weights():\n assert weights.shape == expected_weights.shape\n assert np.allclose(weights, expected_weights, rtol=1e-6, atol=1e-12)\n \n test_gains.description = test_gains_desc\n test_weights.description = test_weights_desc\n \n return test_gains, test_weights\n\n\ndef test_fft_weights():\n for inputs, refs in load_reference_data():\n args = tuple(inputs[col] for col in INPUT_COLS) \n expected = (refs['weights'], refs['gain'])\n test_gains, test_weights = fft_weights_funcs(args, expected)\n yield test_gains\n yield test_weights\n\n\n" }, { "alpha_fraction": 0.723809540271759, "alphanum_fraction": 0.7476190328598022, "avg_line_length": 34, "blob_id": "9f80c95741454e0c7ed693f01520ffadfb9d7b58", "content_id": "0a5783e990bbcf93a2f8e4c7340640e8ab79d496", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "permissive", "max_line_length": 80, "num_lines": 6, "path": "/tests/__init__.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "# Copyright 2014 Jason Heeris, [email protected]\n# \n# This file is part of the gammatone toolkit, and is licensed under the 3-clause\n# BSD license: https://github.com/detly/gammatone/blob/master/COPYING\n\n# Designate as module\n" }, { "alpha_fraction": 0.46368715167045593, "alphanum_fraction": 0.46368715167045593, "avg_line_length": 34.79999923706055, "blob_id": "607fca5ca292b93c7b782aaa4536ddd90695a12a", "content_id": "cfa0a578f04404ba25fffb04577fa63a29520c63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 179, "license_type": "permissive", "max_line_length": 66, "num_lines": 5, "path": "/doc/plot.rst", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": ":mod:`gammatone.plot` -- Plotting utilities for gammatone analysis\n==================================================================\n\n.. automodule:: gammatone.plot\n :members:\n" }, { "alpha_fraction": 0.6680983901023865, "alphanum_fraction": 0.6762983202934265, "avg_line_length": 34.56944274902344, "blob_id": "a83618fdfdb48c34690991d1f95dcd74288bd03a", "content_id": "a9ecebe9725492646cebbfdcc1fff29fc85ee8b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2561, "license_type": "permissive", "max_line_length": 85, "num_lines": 72, "path": "/gammatone/gtgram.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# -*- mode: python -*-\n\"\"\"\nThis module contains functions for rendering \"spectrograms\" which use gammatone\nfilterbanks instead of Fourier transforms.\n\nSee COPYING for copyright and licensing information.\n\n\"\"\"\nimport numpy as np\nfrom .filters import make_erb_filters, centre_freqs, erb_filterbank\n\n\ndef round_half_away_from_zero(num):\n \"\"\"Implement the round-half-away-from-zero rule, where fractional parts of\n 0.5 result in rounding up to the nearest positive integer for positive\n numbers, and down to the nearest negative number for negative integers.\n \"\"\"\n return np.sign(num) * np.floor(np.abs(num) + 0.5)\n\n\ndef gtgram_strides(fs, window_time, hop_time, filterbank_cols):\n \"\"\"\n Calculates the window size for a gammatonegram.\n\n @return a tuple of (window_size, hop_samples, output_columns)\n \"\"\"\n nwin = int(round_half_away_from_zero(window_time * fs))\n hop_samples = int(round_half_away_from_zero(hop_time * fs))\n columns = 1 + int(np.floor((filterbank_cols - nwin) / hop_samples))\n\n return (nwin, hop_samples, columns)\n\n\ndef gtgram_xe(wave, fs, channels, f_min, f_max):\n \"\"\"Calculate the intermediate ERB filterbank processed matrix\"\"\"\n cfs = centre_freqs(fs, channels, f_min, f_max)\n fcoefs = np.flipud(make_erb_filters(fs, cfs))\n xf = erb_filterbank(wave, fcoefs)\n xe = np.power(xf, 2)\n return xe\n\n\ndef gtgram(\n wave, fs, window_time, hop_time, channels, f_min, f_max=None, return_freqs=False\n):\n \"\"\"\n Calculate a spectrogram-like time frequency magnitude array based on\n gammatone subband filters. The waveform ``wave`` (at sample rate ``fs``) is\n passed through an multi-channel gammatone auditory model filterbank, with\n lowest frequency ``f_min`` and highest frequency ``f_max``. The outputs of\n each band then have their energy integrated over windows of ``window_time``\n seconds, advancing by ``hop_time`` secs for successive columns. These\n magnitudes are returned as a nonnegative real matrix with ``channels`` rows.\n\n | 2009-02-23 Dan Ellis [email protected]\n |\n | (c) 2013 Jason Heeris (Python implementation)\n \"\"\"\n xe = gtgram_xe(wave, fs, channels, f_min, f_max)\n nwin, hop_samples, ncols = gtgram_strides(fs, window_time, hop_time, xe.shape[1])\n\n y = np.zeros((channels, ncols))\n\n for cnum in range(ncols):\n segment = xe[:, cnum * hop_samples + np.arange(nwin)]\n y[:, cnum] = np.sqrt(segment.mean(1))\n\n if return_freqs:\n cfs = centre_freqs(fs, channels, f_min, f_max)\n return cfs, y\n return y\n" }, { "alpha_fraction": 0.5921741127967834, "alphanum_fraction": 0.6162376999855042, "avg_line_length": 30.649005889892578, "blob_id": "112d3374b14357afa736c5b85f54587b0b01732b", "content_id": "8452d522bc26613425d5b40dbfdcebe6ea2a688d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4779, "license_type": "permissive", "max_line_length": 87, "num_lines": 151, "path": "/gammatone/fftweight.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# -*- mode: python -*-\n\"\"\"\nThis module contains functions for calculating weights to approximate a\ngammatone filterbank-like \"spectrogram\" from a Fourier transform.\n\nSee COPYING for copyright and licensing information.\n\"\"\"\nfrom __future__ import division\nimport numpy as np\n\nimport gammatone.filters as filters\nimport gammatone.gtgram as gtgram\n\n\ndef specgram_window(\n nfft,\n nwin,\n):\n \"\"\"\n Window calculation used in specgram replacement function. Hann window of\n width `nwin` centred in an array of width `nfft`.\n \"\"\"\n halflen = nwin // 2\n halff = nfft // 2 # midpoint of win\n acthalflen = int(np.floor(min(halff, halflen)))\n halfwin = 0.5 * (1 + np.cos(np.pi * np.arange(0, halflen + 1) / halflen))\n win = np.zeros((nfft,))\n win[halff : halff + acthalflen] = halfwin[0:acthalflen]\n win[halff : halff - acthalflen : -1] = halfwin[0:acthalflen]\n return win\n\n\ndef specgram(x, n, sr, w, h):\n \"\"\"Substitute for Matlab's specgram, calculates a simple spectrogram.\n\n :param x: The signal to analyse\n :param n: The FFT length\n :param sr: The sampling rate\n :param w: The window length (see :func:`specgram_window`)\n :param h: The hop size (must be greater than zero)\n \"\"\"\n # Based on Dan Ellis' myspecgram.m,v 1.1 2002/08/04\n assert h > 0, \"Must have a hop size greater than 0\"\n\n s = x.shape[0]\n win = specgram_window(n, w)\n\n c = 0\n\n # pre-allocate output array\n ncols = 1 + int(np.floor((s - n) / h))\n d = np.zeros(((1 + n // 2), ncols), np.dtype(complex))\n\n for b in range(0, s - n, h):\n u = win * x[b : b + n]\n t = np.fft.fft(u)\n d[:, c] = t[0 : (1 + n // 2)].T\n c = c + 1\n\n return d\n\n\ndef fft_weights(nfft, fs, nfilts, width, fmin, fmax, maxlen):\n \"\"\"\n :param nfft: the source FFT size\n :param sr: sampling rate (Hz)\n :param nfilts: the number of output bands required (default 64)\n :param width: the constant width of each band in Bark (default 1)\n :param fmin: lower limit of frequencies (Hz)\n :param fmax: upper limit of frequencies (Hz)\n :param maxlen: number of bins to truncate the rows to\n\n :return: a tuple `weights`, `gain` with the calculated weight matrices and\n gain vectors\n\n Generate a matrix of weights to combine FFT bins into Gammatone bins.\n\n Note about `maxlen` parameter: While wts has nfft columns, the second half\n are all zero. Hence, aud spectrum is::\n\n fft2gammatonemx(nfft,sr)*abs(fft(xincols,nfft))\n\n `maxlen` truncates the rows to this many bins.\n\n | (c) 2004-2009 Dan Ellis [email protected] based on rastamat/audspec.m\n | (c) 2012 Jason Heeris (Python implementation)\n \"\"\"\n ucirc = np.exp(1j * 2 * np.pi * np.arange(0, nfft / 2 + 1) / nfft)[None, ...]\n\n # Common ERB filter code factored out\n cf_array = filters.erb_space(fmin, fmax, nfilts)[::-1]\n\n _, A11, A12, A13, A14, _, _, _, B2, gain = filters.make_erb_filters(\n fs, cf_array, width\n ).T\n\n A11, A12, A13, A14 = A11[..., None], A12[..., None], A13[..., None], A14[..., None]\n\n r = np.sqrt(B2)\n theta = 2 * np.pi * cf_array / fs\n pole = (r * np.exp(1j * theta))[..., None]\n\n GTord = 4\n\n weights = np.zeros((nfilts, nfft))\n\n weights[:, 0 : ucirc.shape[1]] = (\n np.abs(ucirc + A11 * fs)\n * np.abs(ucirc + A12 * fs)\n * np.abs(ucirc + A13 * fs)\n * np.abs(ucirc + A14 * fs)\n * np.abs(fs * (pole - ucirc) * (pole.conj() - ucirc)) ** (-GTord)\n / gain[..., None]\n )\n\n weights = weights[:, 0 : int(maxlen)]\n\n return weights, gain\n\n\ndef fft_gtgram(wave, fs, window_time, hop_time, channels, f_min):\n \"\"\"\n Calculate a spectrogram-like time frequency magnitude array based on\n an FFT-based approximation to gammatone subband filters.\n\n A matrix of weightings is calculated (using :func:`gtgram.fft_weights`), and\n applied to the FFT of the input signal (``wave``, using sample rate ``fs``).\n The result is an approximation of full filtering using an ERB gammatone\n filterbank (as per :func:`gtgram.gtgram`).\n\n ``f_min`` determines the frequency cutoff for the corresponding gammatone\n filterbank. ``window_time`` and ``hop_time`` (both in seconds) are the size\n and overlap of the spectrogram columns.\n\n | 2009-02-23 Dan Ellis [email protected]\n |\n | (c) 2013 Jason Heeris (Python implementation)\n \"\"\"\n width = 1 # Was a parameter in the MATLAB code\n\n nfft = int(2 ** (np.ceil(np.log2(2 * window_time * fs))))\n nwin, nhop, _ = gtgram.gtgram_strides(fs, window_time, hop_time, 0)\n\n gt_weights, _ = fft_weights(nfft, fs, channels, width, f_min, fs / 2, nfft / 2 + 1)\n\n sgram = specgram(wave, nfft, fs, nwin, nhop)\n\n result = gt_weights.dot(np.abs(sgram)) / nfft\n\n return result\n" }, { "alpha_fraction": 0.48255813121795654, "alphanum_fraction": 0.48255813121795654, "avg_line_length": 33.400001525878906, "blob_id": "5c6ed5811abda40028009dd53c623ad2b467c38e", "content_id": "f9197c30a47f9572961c218369e7e4ecfd0a72da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 172, "license_type": "permissive", "max_line_length": 61, "num_lines": 5, "path": "/doc/filters.rst", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": ":mod:`gammatone.filters` -- gammatone filterbank construction\n=============================================================\n\n.. automodule:: gammatone.filters\n :members:\n" }, { "alpha_fraction": 0.558282196521759, "alphanum_fraction": 0.5950919985771179, "avg_line_length": 12, "blob_id": "f638251f945df85e08b190c32bd1ef27e45aeac6", "content_id": "8b3e832271c66238212ba700cfc137febb46ffe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 326, "license_type": "permissive", "max_line_length": 50, "num_lines": 25, "path": "/doc/index.rst", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": ".. gammatone documentation master file, created by\n sphinx-quickstart on Sat Dec 8 23:21:49 2012.\n\nIndex\n=====\n\nModules\n-------\n\n.. toctree::\n :maxdepth: 2\n\n filters\n gtgram\n fftweight\n plot\n\n.. include:: details.rst\n \nIndices and tables\n------------------\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n\n" }, { "alpha_fraction": 0.4673231840133667, "alphanum_fraction": 0.6132497787475586, "avg_line_length": 28.342105865478516, "blob_id": "2c59aea65c705540bf8d1bef605d92e1dc237afc", "content_id": "80b63ade10e300938abf5cd9a4f897b64417e45d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1117, "license_type": "permissive", "max_line_length": 80, "num_lines": 38, "path": "/tests/test_cfs.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# Copyright 2014 Jason Heeris, [email protected]\n# \n# This file is part of the gammatone toolkit, and is licensed under the 3-clause\n# BSD license: https://github.com/detly/gammatone/blob/master/COPYING\nfrom mock import patch\n\nimport gammatone.filters\n\nEXPECTED_PARAMS = (\n ((0, 0, 0), (0, 0, 0)),\n ((22050, 100, 100), (100, 11025, 100)),\n ((44100, 100, 100), (100, 22050, 100)),\n ((44100, 100, 20), (20, 22050, 100)),\n ((88200, 100, 20), (20, 44100, 100)),\n ((22050, 100, 10), (10, 11025, 100)),\n ((22050, 1000, 100), (100, 11025, 1000)),\n ((160000, 500, 200), (200, 80000, 500)),\n)\n\n\ndef test_centre_freqs():\n for args, params in EXPECTED_PARAMS:\n yield CentreFreqsTester(args, params)\n\n\nclass CentreFreqsTester:\n\n def __init__(self, args, params):\n self.args = args\n self.params = params\n self.description = \"Centre freqs for {:g} {:d} {:g}\".format(*args)\n\n\n @patch('gammatone.filters.erb_space')\n def __call__(self, erb_space_mock):\n gammatone.filters.centre_freqs(*self.args)\n erb_space_mock.assert_called_with(*self.params)\n\n\n" }, { "alpha_fraction": 0.47398844361305237, "alphanum_fraction": 0.47398844361305237, "avg_line_length": 33.599998474121094, "blob_id": "9286a6cc150d820aeca9375ff5b93b783801fcad", "content_id": "ae4f33bac0926dae84be728d98882c6db524b1ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 173, "license_type": "permissive", "max_line_length": 62, "num_lines": 5, "path": "/doc/gtgram.rst", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": ":mod:`gammatone.gtgram` -- spectrogram-like gammatone analysis\n==============================================================\n\n.. automodule:: gammatone.gtgram\n :members:\n" }, { "alpha_fraction": 0.6041228175163269, "alphanum_fraction": 0.6095919013023376, "avg_line_length": 30.263158798217773, "blob_id": "5bd006435d96060aff1f41ba9b80c9df45d302c6", "content_id": "17eda5e12a3f7a739e3b67e7302c3f168f2b5dd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2377, "license_type": "permissive", "max_line_length": 88, "num_lines": 76, "path": "/tests/test_specgram.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# Copyright 2014 Jason Heeris, [email protected]\n#\n# This file is part of the gammatone toolkit, and is licensed under the 3-clause\n# BSD license: https://github.com/detly/gammatone/blob/master/COPYING\nfrom mock import patch\nimport numpy as np\nimport scipy.io\nfrom pkg_resources import resource_stream\n\nimport gammatone.fftweight\n\nREF_DATA_FILENAME = 'data/test_specgram_data.mat'\n\nINPUT_KEY = 'specgram_inputs'\nMOCK_KEY = 'specgram_mocks'\nRESULT_KEY = 'specgram_results'\n\nINPUT_COLS = ('name', 'wave', 'nfft', 'fs', 'nwin', 'nhop')\nMOCK_COLS = ('window',)\nRESULT_COLS = ('res',)\n\n\ndef load_reference_data():\n \"\"\" Load test data generated from the reference code \"\"\"\n # Load test data\n with resource_stream(__name__, REF_DATA_FILENAME) as test_data:\n data = scipy.io.loadmat(test_data, squeeze_me=False)\n\n zipped_data = zip(data[INPUT_KEY], data[MOCK_KEY], data[RESULT_KEY])\n for inputs, mocks, refs in zipped_data:\n input_dict = dict(zip(INPUT_COLS, inputs))\n mock_dict = dict(zip(MOCK_COLS, mocks))\n ref_dict = dict(zip(RESULT_COLS, refs))\n\n yield (input_dict, mock_dict, ref_dict)\n\n\ndef test_specgram():\n for inputs, mocks, refs in load_reference_data():\n args = (\n inputs['nfft'],\n inputs['fs'],\n inputs['nwin'],\n inputs['nhop'],\n )\n\n yield SpecgramTester(\n inputs['name'][0],\n args,\n inputs['wave'],\n mocks['window'],\n refs['res']\n )\n\nclass SpecgramTester:\n \"\"\" Testing class for specgram replacement calculation \"\"\"\n\n def __init__(self, name, args, sig, window, expected):\n self.signal = np.asarray(sig).squeeze()\n self.expected = np.asarray(expected).squeeze()\n self.args = [int(a.squeeze()) for a in args]\n self.window = window.squeeze()\n self.description = \"Specgram for {:s}\".format(name)\n\n\n def __call__(self):\n with patch(\n 'gammatone.fftweight.specgram_window',\n return_value=self.window):\n result = gammatone.fftweight.specgram(self.signal, *self.args)\n\n max_diff = np.max(np.abs(result - self.expected))\n diagnostic = \"Maximum difference: {:6e}\".format(max_diff)\n\n assert np.allclose(result, self.expected, rtol=1e-6, atol=1e-12), diagnostic\n\n" }, { "alpha_fraction": 0.596500813961029, "alphanum_fraction": 0.6063422560691833, "avg_line_length": 28.967212677001953, "blob_id": "abd5a00dda84ccaf71ad224b328cea98466abaab", "content_id": "ff65632c1d58322d34c9a72afac94699bf5ea684", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1829, "license_type": "permissive", "max_line_length": 80, "num_lines": 61, "path": "/tests/test_gammatone_filters.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# Copyright 2014 Jason Heeris, [email protected]\n# \n# This file is part of the gammatone toolkit, and is licensed under the 3-clause\n# BSD license: https://github.com/detly/gammatone/blob/master/COPYING\nimport numpy as np\nimport scipy.io\nfrom pkg_resources import resource_stream\n\nimport gammatone.filters\n\nREF_DATA_FILENAME = 'data/test_erb_filter_data.mat'\n\nINPUT_KEY = 'erb_filter_inputs'\nRESULT_KEY = 'erb_filter_results'\n\nINPUT_COLS = ('fs', 'cfs')\nRESULT_COLS = ('fcoefs',)\n\ndef load_reference_data():\n \"\"\" Load test data generated from the reference code \"\"\"\n # Load test data\n with resource_stream(__name__, REF_DATA_FILENAME) as test_data:\n data = scipy.io.loadmat(test_data, squeeze_me=False)\n \n zipped_data = zip(data[INPUT_KEY], data[RESULT_KEY])\n \n for inputs, refs in zipped_data:\n input_dict = dict(zip(INPUT_COLS, map(np.squeeze, inputs)))\n ref_dict = dict(zip(RESULT_COLS, map(np.squeeze, refs)))\n yield (input_dict, ref_dict)\n\n\ndef test_make_ERB_filters_known_values():\n for inputs, refs in load_reference_data():\n args = (\n inputs['fs'],\n inputs['cfs'],\n )\n \n expected = (refs['fcoefs'],)\n \n yield MakeERBFiltersTester(args, expected)\n\n\nclass MakeERBFiltersTester:\n \n def __init__(self, args, expected):\n self.fs = args[0]\n self.cfs = args[1]\n self.expected = expected[0]\n self.description = (\n \"Gammatone filters for {:f}, {:.1f} ... {:.1f}\".format(\n float(self.fs),\n float(self.cfs[0]),\n float(self.cfs[-1])\n ))\n \n def __call__(self):\n result = gammatone.filters.make_erb_filters(self.fs, self.cfs)\n assert np.allclose(result, self.expected, rtol=1e-6, atol=1e-12)\n\n" }, { "alpha_fraction": 0.765684187412262, "alphanum_fraction": 0.7724210619926453, "avg_line_length": 38.25619888305664, "blob_id": "fd6006204dc008aef9eadc45e14632bc3b46c2e5", "content_id": "c3542da9bf6ffa31a81af2c374d5f3e1b1c338e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4756, "license_type": "permissive", "max_line_length": 72, "num_lines": 121, "path": "/doc/details.rst", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "About the Gammatone Filterbank Toolkit\n--------------------------------------\n\nSummary\n~~~~~~~\n\nThis is a port of Malcolm Slaney's and Dan Ellis' gammatone filterbank\nMATLAB code, detailed below, to Python 2 and 3 using Numpy and Scipy. It\nanalyses signals by running them through banks of gammatone filters,\nsimilar to Fourier-based spectrogram analysis.\n\n.. figure:: FurElise.png\n :align: center\n :alt: Gammatone-based spectrogram of Für Elise\n\n Gammatone-based spectrogram of Für Elise\n\nDependencies\n~~~~~~~~~~~~\n\n- numpy\n- scipy\n- nose\n- mock\n- matplotlib\n\nUsing the Code\n~~~~~~~~~~~~~~\n\nFor a demonstration, find a `.wav` file (for example,\n`Für Elise <http://heeris.id.au/samples/FurElise.wav>`_) and run::\n\n python -m gammatone FurElise.wav -d 10\n\n...to see a gammatone-gram of the first ten seconds of Beethoven's \"Für\nElise.\" If you've installed via\n``pip`` or ``setup.py install``, you should also be able to just run::\n\n gammatone FurElise.wav -d 10\n\nBasis\n~~~~~\n\nThis project is based on research into how humans perceive audio,\noriginally published by Malcolm Slaney:\n\n`Malcolm Slaney (1998) \"Auditory Toolbox Version 2\", Technical Report\n#1998-010, Interval Research Corporation,\n1998. <http://cobweb.ecn.purdue.edu/~malcolm/interval/1998-010/>`_\n\nSlaney's report describes a way of modelling how the human ear\nperceives, emphasises and separates different frequencies of sound. A\nseries of gammatone filters are constructed whose width increases with\nincreasing centre frequency, and this bank of filters is applied to a\ntime-domain signal. The result of this is a spectrum that should\nrepresent the human experience of sound better than, say, a\nFourier-domain spectrum would.\n\nA gammatone filter has an impulse response that is a sine wave\nmultiplied by a gamma distribution function. It is a common approach to\nmodelling the auditory system.\n\nThe gammatone filterbank approach can be considered analogous (but not\nequivalent) to a discrete Fourier transform where the frequency axis is\nlogarithmic. For example, a series of notes spaced an octave apart would\nappear to be roughly linearly spaced; or a sound that was distributed\nacross the same linear frequency range would appear to have more spread\nat lower frequencies.\n\nThe real goal of this toolkit is to allow easy computation of the\ngammatone equivalent of a spectrogram — a time-varying spectrum of\nenergy over audible frequencies based on a gammatone filterbank.\n\nSlaney demonstrated his research with an initial implementation in\nMATLAB. This implementation was later extended by Dan Ellis, who found a\nway to approximate a \"gammatone-gram\" by using the fast Fourier\ntransform. Ellis' code calculates a matrix of weights that can be\napplied to the output of a FFT so that a Fourier-based spectrogram can\neasily be transformed into such an approximation.\n\nEllis' code and documentation is here: `Gammatone-like\nspectrograms <http://labrosa.ee.columbia.edu/matlab/gammatonegram/>`_\n\nInterest\n~~~~~~~~\n\nI became interested in this because of my background in science\ncommunication and my general interest in the teaching of signal\nprocessing. I find that the spectrogram approach to visualising signals\nis adequate for illustrating abstract systems or the mathematical\nproperties of transforms, but bears little correspondence to a person's\nown experience of sound. If someone wants to see what their favourite\npiece of music \"looks like,\" a normal Fourier transform based\nspectrogram is actually quite a poor way to visualise it. Features of\nthe audio seem to be oddly spaced or unnaturally emphasised or\nde-emphasised depending on where they are in the frequency domain.\n\nThe gammatone filterbank approach seems to be closer to what someone\nmight intuitively expect a visualisation of sound to look like, and can\nhelp develop an intuition about alternative representations of signals.\n\nVerifying the port\n~~~~~~~~~~~~~~~~~~\n\nSince this is a port of existing MATLAB code, I've written tests to\nverify the Python implementation against the original code. These tests\naren't unit tests, but they do generally test single functions. Running\nthe tests has the same workflow:\n\n1. Run the scripts in the ``test_generation`` directory. This will\n create a ``.mat`` file containing test data in ``tests/data``.\n\n2. Run ``nosetest3`` in the top level directory. This will find and run\n all the tests in the ``tests`` directory.\n\nAlthough I'm usually loathe to check in generated files to version\ncontrol, I'm willing to make an exception for the ``.mat`` files\ncontaining the test data. My reasoning is that they represent the\ndecoupling of my code from the MATLAB code, and if the two projects were\nseparated, they would be considered a part of the Python code, not the\noriginal MATLAB code.\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5612244606018066, "avg_line_length": 20.77777862548828, "blob_id": "d3e5186509b90a864e9314dcc1fad4bd00e77abc", "content_id": "9c9d25f3424a326825f9c40ce361d3529278f79c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "permissive", "max_line_length": 51, "num_lines": 9, "path": "/gammatone/__init__.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# -*- mode: python -*-\n\"\"\"gammatone filterbank toolkit\n\nCopyright (C) 2013 Jason Heeris, <[email protected]>\nCopyright (C) 2022 Dan Meliza, Jonah Weissmann, Tyler Robbins <[email protected]>\n\n\"\"\"\n__version__ = \"0.1.1\"\n" }, { "alpha_fraction": 0.5852017998695374, "alphanum_fraction": 0.5885650515556335, "avg_line_length": 28.725000381469727, "blob_id": "06ec4030a18b0358639f73ca7b7db7fff3c15430", "content_id": "2dbede5ee7edee1d14f6ff407496fd7368f41cf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3568, "license_type": "permissive", "max_line_length": 88, "num_lines": 120, "path": "/tests/test_gammatonegram.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# Copyright 2014 Jason Heeris, [email protected]\n#\n# This file is part of the gammatone toolkit, and is licensed under the 3-clause\n# BSD license: https://github.com/detly/gammatone/blob/master/COPYING\nfrom mock import patch\nimport numpy as np\nimport scipy.io\nfrom pkg_resources import resource_stream\n\nimport gammatone.gtgram\n\nREF_DATA_FILENAME = 'data/test_gammatonegram_data.mat'\n\nINPUT_KEY = 'gammatonegram_inputs'\nMOCK_KEY = 'gammatonegram_mocks'\nRESULT_KEY = 'gammatonegram_results'\n\nINPUT_COLS = ('name', 'wave', 'fs', 'twin', 'thop', 'channels', 'fmin')\nMOCK_COLS = ('erb_fb', 'erb_fb_cols')\nRESULT_COLS = ('gtgram', 'nwin', 'hopsamps', 'ncols')\n\n\ndef load_reference_data():\n \"\"\" Load test data generated from the reference code \"\"\"\n # Load test data\n with resource_stream(__name__, REF_DATA_FILENAME) as test_data:\n data = scipy.io.loadmat(test_data, squeeze_me=True)\n\n zipped_data = zip(data[INPUT_KEY], data[MOCK_KEY], data[RESULT_KEY])\n for inputs, mocks, refs in zipped_data:\n input_dict = dict(zip(INPUT_COLS, inputs))\n mock_dict = dict(zip(MOCK_COLS, mocks))\n ref_dict = dict(zip(RESULT_COLS, refs))\n yield (input_dict, mock_dict, ref_dict)\n\n\ndef test_nstrides():\n \"\"\" Test gamamtonegram stride calculations \"\"\"\n for inputs, mocks, refs in load_reference_data():\n args = (\n inputs['fs'],\n inputs['twin'],\n inputs['thop'],\n mocks['erb_fb_cols']\n )\n\n expected = (\n refs['nwin'],\n refs['hopsamps'],\n refs['ncols']\n )\n\n yield GTGramStrideTester(inputs['name'], args, expected)\n\n\nclass GTGramStrideTester:\n \"\"\" Testing class for gammatonegram stride calculation \"\"\"\n\n def __init__(self, name, inputs, expected):\n self.inputs = inputs\n self.expected = expected\n self.description = \"Gammatonegram strides for {:s}\".format(name)\n\n def __call__(self):\n results = gammatone.gtgram.gtgram_strides(*self.inputs)\n\n diagnostic = (\n \"result: {:s}, expected: {:s}\".format(\n str(results),\n str(self.expected)\n )\n )\n\n # These are integer values, so use direct equality\n assert results == self.expected\n\n\n# TODO: possibly mock out gtgram_strides\n\ndef test_gtgram():\n for inputs, mocks, refs in load_reference_data():\n args = (\n inputs['fs'],\n inputs['twin'],\n inputs['thop'],\n inputs['channels'],\n inputs['fmin']\n )\n\n yield GammatonegramTester(\n inputs['name'],\n args,\n inputs['wave'],\n mocks['erb_fb'],\n refs['gtgram']\n )\n\nclass GammatonegramTester:\n \"\"\" Testing class for gammatonegram calculation \"\"\"\n\n def __init__(self, name, args, sig, erb_fb_out, expected):\n self.signal = np.asarray(sig)\n self.expected = np.asarray(expected)\n self.erb_fb_out = np.asarray(erb_fb_out)\n self.args = args\n\n self.description = \"Gammatonegram for {:s}\".format(name)\n\n def __call__(self):\n with patch(\n 'gammatone.gtgram.erb_filterbank',\n return_value=self.erb_fb_out):\n\n result = gammatone.gtgram.gtgram(self.signal, *self.args)\n\n max_diff = np.max(np.abs(result - self.expected))\n diagnostic = \"Maximum difference: {:6e}\".format(max_diff)\n\n assert np.allclose(result, self.expected, rtol=1e-6, atol=1e-12), diagnostic\n\n" }, { "alpha_fraction": 0.6085776090621948, "alphanum_fraction": 0.6194353699684143, "avg_line_length": 29.1639347076416, "blob_id": "5f334f49f7dc853faa211e41888d5d556c4b1910", "content_id": "4aed0b1af0a297f2531e890ae66fb963b2e17b09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1842, "license_type": "permissive", "max_line_length": 80, "num_lines": 61, "path": "/tests/test_filterbank.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# Copyright 2014 Jason Heeris, [email protected]\n# \n# This file is part of the gammatone toolkit, and is licensed under the 3-clause\n# BSD license: https://github.com/detly/gammatone/blob/master/COPYING\nimport numpy as np\nimport scipy.io\nfrom pkg_resources import resource_stream\n\nimport gammatone.filters\n\nREF_DATA_FILENAME = 'data/test_filterbank_data.mat'\n\nINPUT_KEY = 'erb_filterbank_inputs'\nRESULT_KEY = 'erb_filterbank_results'\n\nINPUT_COLS = ('fcoefs', 'wave')\nRESULT_COLS = ('filterbank',)\n\ndef load_reference_data():\n \"\"\" Load test data generated from the reference code \"\"\"\n # Load test data\n with resource_stream(__name__, REF_DATA_FILENAME) as test_data:\n data = scipy.io.loadmat(test_data, squeeze_me=False)\n \n zipped_data = zip(data[INPUT_KEY], data[RESULT_KEY])\n \n for inputs, refs in zipped_data:\n input_dict = dict(zip(INPUT_COLS, map(np.squeeze, inputs)))\n ref_dict = dict(zip(RESULT_COLS, map(np.squeeze, refs)))\n yield (input_dict, ref_dict)\n\n\ndef test_ERB_filterbank_known_values():\n for inputs, refs in load_reference_data():\n args = (\n inputs['wave'],\n inputs['fcoefs'],\n )\n \n expected = (refs['filterbank'],)\n \n yield ERBFilterBankTester(args, expected)\n\n\nclass ERBFilterBankTester:\n \n def __init__(self, args, expected):\n self.signal = args[0]\n self.fcoefs = args[1]\n self.expected = expected[0]\n \n self.description = (\n \"Gammatone filterbank result for {:.1f} ... {:.1f}\".format(\n self.fcoefs[0][0],\n self.fcoefs[0][1]\n ))\n \n def __call__(self):\n result = gammatone.filters.erb_filterbank(self.signal, self.fcoefs)\n assert np.allclose(result, self.expected, rtol=1e-5, atol=1e-12)\n\n\n" }, { "alpha_fraction": 0.7619929909706116, "alphanum_fraction": 0.768660843372345, "avg_line_length": 34.99333190917969, "blob_id": "a8c2d64b4f15cd3135d16e23b6742e6e3ce53bdf", "content_id": "9c861a5f79577bd05d591219d488c10eebefa1a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5403, "license_type": "permissive", "max_line_length": 85, "num_lines": 150, "path": "/README.md", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "Gammatone Filterbank Toolkit\n============================\n\n*Utilities for analysing sound using perceptual models of human hearing.*\n\nThis is a fork of the gammatone package library by Jason Heeris, 2013. All\nmodifications are by Dan Meliza, Jonah Weissmann, and Tyler Robbins.\n\nSummary\n-------\n\nThis is a port of Malcolm Slaney's and Dan Ellis' gammatone filterbank MATLAB\ncode, detailed below, to Python 2 and 3 using Numpy and Scipy. It analyses signals by\nrunning them through banks of gammatone filters, similar to Fourier-based\nspectrogram analysis.\n\n![Gammatone-based spectrogram of Für Elise](doc/FurElise.png)\n\nInstallation\n------------\n\nYou can install directly from this git repository using:\n\n```text\npip install git+https://github.com/melizalab/gammatone.git\n```\n\n...or you can clone the git repository however you prefer, and do:\n\n```text\npip install .\n```\n\n...or:\n\n```\npython setup.py install\n```\n\n...from the cloned tree.\n\n### Dependencies\n\n - numpy\n - scipy\n### for generating plots (optional)\n - matplotlib\n### for running tests (optional)\n - nose\n - mock\n\nUsing the Code\n--------------\n\nIf you want to use the CLI, make sure to enable the `plot` extra\nduring installation (e.g. `pip install .[plot`).\n\nSee the [API documentation](http://detly.github.io/gammatone/). For a\ndemonstration, find a `.wav` file (for example,\n[Für Elise](http://heeris.id.au/samples/FurElise.wav)) and run:\n\n```text\npython -m gammatone FurElise.wav -d 10\n```\n\n...to see a gammatone-gram of the first ten seconds of the track. If you've\ninstalled via `pip` or `setup.py install`, you should also be able to just run:\n\n```text\ngammatone FurElise.wav -d 10\n```\n\nBasis\n-----\n\nThis project is based on research into how humans perceive audio, originally\npublished by Malcolm Slaney:\n\n[Malcolm Slaney (1998) \"Auditory Toolbox Version 2\", Technical Report #1998-010,\nInterval Research Corporation, 1998.](\nhttp://cobweb.ecn.purdue.edu/~malcolm/interval/1998-010/\n)\n\nSlaney's report describes a way of modelling how the human ear perceives,\nemphasises and separates different frequencies of sound. A series of gammatone\nfilters are constructed whose width increases with increasing centre frequency,\nand this bank of filters is applied to a time-domain signal. The result of this\nis a spectrum that should represent the human experience of sound better than,\nsay, a Fourier-domain spectrum would.\n\nA gammatone filter has an impulse response that is a sine wave multiplied by a\ngamma distribution function. It is a common approach to modelling the auditory\nsystem.\n\nThe gammatone filterbank approach can be considered analogous (but not\nequivalent) to a discrete Fourier transform where the frequency axis is\nlogarithmic. For example, a series of notes spaced an octave apart would appear\nto be roughly linearly spaced; or a sound that was distributed across the same\nlinear frequency range would appear to have more spread at lower frequencies.\n\nThe real goal of this toolkit is to allow easy computation of the gammatone\nequivalent of a spectrogram — a time-varying spectrum of energy over audible\nfrequencies based on a gammatone filterbank.\n\nSlaney demonstrated his research with an initial implementation in MATLAB. This\nimplementation was later extended by Dan Ellis, who found a way to approximate a\n\"gammatone-gram\" by using the fast Fourier transform. Ellis' code calculates a\nmatrix of weights that can be applied to the output of a FFT so that a\nFourier-based spectrogram can easily be transformed into such an approximation.\n\nEllis' code and documentation is here: [Gammatone-like spectrograms](\nhttp://labrosa.ee.columbia.edu/matlab/gammatonegram/\n)\n\nInterest\n--------\n\nI became interested in this because of my background in science communication\nand my general interest in the teaching of signal processing. I find that the\nspectrogram approach to visualising signals is adequate for illustrating\nabstract systems or the mathematical properties of transforms, but bears little\ncorrespondence to a person's own experience of sound. If someone wants to see\nwhat their favourite piece of music \"looks like,\" a normal Fourier transform\nbased spectrogram is actually quite a poor way to visualise it. Features of the\naudio seem to be oddly spaced or unnaturally emphasised or de-emphasised\ndepending on where they are in the frequency domain.\n\nThe gammatone filterbank approach seems to be closer to what someone might\nintuitively expect a visualisation of sound to look like, and can help develop\nan intuition about alternative representations of signals.\n\nVerifying the port\n------------------\n\nSince this is a port of existing MATLAB code, I've written tests to verify the\nPython implementation against the original code. These tests aren't unit tests,\nbut they do generally test single functions. Running the tests has the same\nworkflow:\n\n 1. Run the scripts in the `test_generation` directory. This will create a\n `.mat` file containing test data in `tests/data`.\n\n 2. Run `nosetest3` in the top level directory. This will find and run all the\n tests in the `tests` directory.\n\nAlthough I'm usually loathe to check in generated files to version control, I'm\nwilling to make an exception for the `.mat` files containing the test data. My\nreasoning is that they represent the decoupling of my code from the MATLAB code,\nand if the two projects were separated, they would be considered a part of the\nPython code, not the original MATLAB code.\n" }, { "alpha_fraction": 0.6029931902885437, "alphanum_fraction": 0.6089795827865601, "avg_line_length": 31.522123336791992, "blob_id": "9b909b078573c81a6900ed4807a6c79aab193446", "content_id": "2d9f53b82c0fb7caad754f83912a8b276a7c1e4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3675, "license_type": "permissive", "max_line_length": 88, "num_lines": 113, "path": "/tests/test_fft_gtgram.py", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# Copyright 2014 Jason Heeris, [email protected]\n#\n# This file is part of the gammatone toolkit, and is licensed under the 3-clause\n# BSD license: https://github.com/detly/gammatone/blob/master/COPYING\nfrom mock import patch\nimport numpy as np\nimport scipy.io\nfrom pkg_resources import resource_stream\n\nimport gammatone.fftweight\n\nREF_DATA_FILENAME = \"data/test_fft_gammatonegram_data.mat\"\n\nINPUT_KEY = \"fft_gammatonegram_inputs\"\nMOCK_KEY = \"fft_gammatonegram_mocks\"\nRESULT_KEY = \"fft_gammatonegram_results\"\n\nINPUT_COLS = (\"name\", \"wave\", \"fs\", \"twin\", \"thop\", \"channels\", \"fmin\")\nMOCK_COLS = (\"wts\",)\nRESULT_COLS = (\"res\", \"window\", \"nfft\", \"nwin\", \"nhop\")\n\n\ndef load_reference_data():\n \"\"\"Load test data generated from the reference code\"\"\"\n # Load test data\n with resource_stream(__name__, REF_DATA_FILENAME) as test_data:\n data = scipy.io.loadmat(test_data, squeeze_me=False)\n\n zipped_data = zip(data[INPUT_KEY], data[MOCK_KEY], data[RESULT_KEY])\n for inputs, mocks, refs in zipped_data:\n input_dict = dict(zip(INPUT_COLS, inputs))\n mock_dict = dict(zip(MOCK_COLS, mocks))\n ref_dict = dict(zip(RESULT_COLS, refs))\n\n yield (input_dict, mock_dict, ref_dict)\n\n\ndef test_fft_specgram_window():\n for inputs, mocks, refs in load_reference_data():\n args = (\n refs[\"nfft\"],\n refs[\"nwin\"],\n )\n\n expected = (refs[\"window\"],)\n\n yield FFTGtgramWindowTester(inputs[\"name\"], args, expected)\n\n\nclass FFTGtgramWindowTester:\n def __init__(self, name, args, expected):\n self.nfft = int(args[0].squeeze())\n self.nwin = int(args[1].squeeze())\n self.expected = expected[0].squeeze()\n\n self.description = (\n \"FFT gammatonegram window for nfft = {:f}, nwin = {:f}\".format(\n (self.nfft), (self.nwin)\n )\n )\n\n def __call__(self):\n result = gammatone.fftweight.specgram_window(self.nfft, self.nwin)\n max_diff = np.max(np.abs(result - self.expected))\n diagnostic = \"Maximum difference: {:6e}\".format(max_diff)\n assert np.allclose(result, self.expected, rtol=1e-6, atol=1e-12), diagnostic\n\n\ndef test_fft_gtgram():\n for inputs, mocks, refs in load_reference_data():\n args = (\n inputs[\"fs\"],\n inputs[\"twin\"],\n inputs[\"thop\"],\n inputs[\"channels\"],\n inputs[\"fmin\"],\n )\n\n yield FFTGammatonegramTester(\n inputs[\"name\"][0],\n args,\n inputs[\"wave\"],\n mocks[\"wts\"],\n refs[\"window\"],\n refs[\"res\"],\n )\n\n\nclass FFTGammatonegramTester:\n \"\"\"Testing class for gammatonegram calculation\"\"\"\n\n def __init__(self, name, args, sig, fft_weights, window, expected):\n self.signal = np.asarray(sig).squeeze()\n self.expected = np.asarray(expected).squeeze()\n self.fft_weights = np.asarray(fft_weights)\n self.args = args\n self.window = window.squeeze()\n\n self.description = \"FFT gammatonegram for {:s}\".format(name)\n\n def __call__(self):\n # Note that the second return value from fft_weights isn't actually used\n with patch(\n \"gammatone.fftweight.fft_weights\", return_value=(self.fft_weights, None)\n ), patch(\"gammatone.fftweight.specgram_window\", return_value=self.window):\n\n result = gammatone.fftweight.fft_gtgram(self.signal, *self.args)\n\n max_diff = np.max(np.abs(result - self.expected))\n diagnostic = \"Maximum difference: {:6e}\".format(max_diff)\n\n assert np.allclose(result, self.expected, rtol=1e-6, atol=1e-12), diagnostic\n" }, { "alpha_fraction": 0.4727272689342499, "alphanum_fraction": 0.4727272689342499, "avg_line_length": 43, "blob_id": "feb5db0a6d5561a0611ea1c0fa374b0ce26865d4", "content_id": "2914efd4ede39b653c1823f0215c16fef3a6df7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 220, "license_type": "permissive", "max_line_length": 84, "num_lines": 5, "path": "/doc/fftweight.rst", "repo_name": "melizalab/gammatone", "src_encoding": "UTF-8", "text": ":mod:`gammatone.fftweight` -- FFT weightings for spectrogram-like gammatone analysis\n====================================================================================\n\n.. automodule:: gammatone.fftweight\n :members:\n" } ]
20
swatson080/SortingAlgorithms
https://github.com/swatson080/SortingAlgorithms
0688b0b719f7407d47b6caff26e8ab2048b24264
82a6fe128d143ff7cf9f383bf9aeb5a19b0dd11b
fece1a9294d3f76b58eed0185cc13df341266b29
refs/heads/main
2023-09-06T09:20:05.606656
2021-11-22T16:55:54
2021-11-22T16:55:54
427,555,689
0
0
null
2021-11-13T03:07:11
2021-11-19T04:48:10
2021-11-22T16:55:54
Python
[ { "alpha_fraction": 0.6154042482376099, "alphanum_fraction": 0.6294312477111816, "avg_line_length": 32.359649658203125, "blob_id": "6d4304af8805b60caf7c3e4180ff6aaa4adc5c9c", "content_id": "1bf20b1eb4c0fd11c83b613c77eb3685eaaf08eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3921, "license_type": "no_license", "max_line_length": 110, "num_lines": 114, "path": "/main.py", "repo_name": "swatson080/SortingAlgorithms", "src_encoding": "UTF-8", "text": "# Collection of sorting algorithms\r\n# TODO: Get a .csv containing unsorted integers to read in to test sorting\r\n# TODO: Make a GUI for this\r\n\r\n### THE QUICKSORT ALGORITHM -- O(nlogn) -> O(n^2) ###\r\n\r\n# partition - Defines a pivot element then rearranges list so that elements less than or equal to the pivot\r\n# are to the left of it while elements greater than or equal to it are on the right\r\n# The start and end arguments specify which part of the list to partition\r\ndef partition(list, start, end):\r\n # define pivot element as the first element in the list\r\n pivot = list[start]\r\n # the low value is set as the value of the element immediately after the pivot\r\n low = start + 1\r\n # the high value is set to end\r\n high = end\r\n\r\n while(True):\r\n\r\n # Look for an element before the low pointer that is less than the pivot\r\n while low <= high and list[high] >= pivot:\r\n high -= 1\r\n\r\n # Look for an element that is greater than the pivot\r\n while low <= high and list[low] <= pivot:\r\n low += 1\r\n\r\n # Swap the two elements if we have not gone through the list yet\r\n if low <= high:\r\n temp = list[low]\r\n list[low] = list[high]\r\n list[high] = temp\r\n\r\n # Otherwise exit the loop\r\n else:\r\n break\r\n\r\n # Now move the pivot into the correct position\r\n temp = list[start]\r\n list[start] = list[high]\r\n list[high] = temp\r\n\r\n # return the high index\r\n return high\r\n\r\n# Quicksort recursively calls itself, repeatedly breaking the list into left and right sublists\r\n# until it reaches lists of 0 or 1 elements\r\n# At that point the list is sorted\r\ndef quickSort(list, start, end):\r\n if start >= end:\r\n return\r\n\r\n p = partition(list, start, end)\r\n quickSort(list, start, p-1)\r\n quickSort(list, p+1, end)\r\n\r\n### THE MERGESORT ALGORITHM -- O(nlogn)###\r\n\r\ndef merge(list, left, right, middle):\r\n # First make copies of each array to be merged\r\n leftCopy = list[left:middle+1]\r\n rightCopy = list[middle+1:right+1]\r\n\r\n # Pointers to keep track of position in each array\r\n leftPos = 0\r\n rightPos = 0\r\n sortedIndex = left\r\n\r\n # Now go through both list copies until we run out of elements in one of them\r\n while leftPos < len(leftCopy) and rightPos < len(rightCopy):\r\n\r\n # If the left copy element is smaller, position it in the sorted array\r\n if leftCopy[leftPos] <= rightCopy[rightPos]:\r\n list[sortedIndex] = leftCopy[leftPos]\r\n leftPos += 1\r\n\r\n # If the right copy element is smaller, position it in the sorted array\r\n else:\r\n list[sortedIndex] = rightCopy[rightPos]\r\n rightPos += 1\r\n\r\n # Move the pointer for the sorted list forward\r\n sortedIndex += 1\r\n\r\n # We have now run out of elements in one of the arrays\r\n # At this point we can add all remaining elements of the leftover array in the order they are currently in\r\n while leftPos < len(leftCopy):\r\n list[sortedIndex] = leftCopy[leftPos]\r\n leftPos += 1\r\n sortedIndex += 1\r\n\r\n while rightPos < len(rightCopy):\r\n list[sortedIndex] = rightCopy[rightPos]\r\n rightPos += 1\r\n sortedIndex += 1\r\n\r\n# Merge sort recursively breaks the list in half until we have subarrays of one element each\r\n# Merge is then called on pairs of these arrays, and the elements are 'merged' into their sorted positions\r\ndef mergeSort(list, left, right):\r\n # Base case\r\n if left >= right:\r\n return\r\n\r\n # Recursive steps\r\n middle = (left + right) // 2\r\n mergeSort(list, left, middle)\r\n mergeSort(list, middle+1, right)\r\n merge(list, left, right, middle)\r\n\r\n# Try out your own list here!\r\ntestList = [31, 45, 2, 35, 7, 50, 100, 1, 28, 47, 30, 3, 49, 15, 21, 24, 45]\r\nprint(testList)\r\nmergeSort(testList, 0, len(testList)-1)\r\nprint(testList)\r\n\r\n\r\n" } ]
1
ShaikhAbdulRahi/My-Projects
https://github.com/ShaikhAbdulRahi/My-Projects
93e52d36d2a4784ca914d525f9607ea8a40840da
92443ea54bdba362190afe7c612dd919289a0548
317c80195ac3f59feac405377f98b48079dd0e4a
refs/heads/main
2023-02-07T16:07:30.510429
2021-01-03T14:33:58
2021-01-03T14:33:58
326,426,869
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6667225360870361, "alphanum_fraction": 0.7116400003433228, "avg_line_length": 21.98458480834961, "blob_id": "89724cb7d2e482f82bcd20456bd8a10eb87b915c", "content_id": "3907bf2b371a07d144d2d3051806ff95d79595b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11933, "license_type": "no_license", "max_line_length": 391, "num_lines": 519, "path": "/1_Case Studies_IMDb_Movie_Ratings.py", "repo_name": "ShaikhAbdulRahi/My-Projects", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# # 1.1: Read the Movies Data\n\n# In[1]:\n\n\nimport pandas as pd\n\n\n# In[2]:\n\n\nmovies=pd.read_csv(\"E:/Decode_Lectures/Case Study/Case Study_01/Movie+Assignment+Data.csv\")\nmovies\n\n\n# # 1.2: Inspect the Dataframe\n\n# In[3]:\n\n\nmovies.isna().sum() # Inspect the null-values in the data frame\n\n\n# In[4]:\n\n\nmovies.shape# Inspect the dimensions of the data frame\n\n\n# In[5]:\n\n\nmovies.head()# Inspect the summary of the data frame\n\n\n# In[6]:\n\n\nmovies.info()# Inspect the information about the data frame\n\n\n# In[7]:\n\n\nmovies.describe()# Inspect the information about numeric column of the data frame\n\n\n# # Task 2: Data Analysis\n\n# # 2.1: Reduce those Digits from \"Budget \" &\"Gross \"\n\n# In[8]:\n\n\nmovies[\"Gross\"]=movies[\"Gross\"]/1000000\nmovies[\"budget\"]=movies[\"budget\"]/1000000\nmovies\n\n\n# # 2.2: Let's Talk Profit!\n\n# In[9]:\n\n\n# Create the new column named 'profit' by ['budget' -'gross'] column\nmovies[\"profit\"]=movies[\"Gross\"]- movies[\"budget\"]\nmovies\n\n\n# In[10]:\n\n\n#2Sort the dataframe using the profit column as reference\nmovies=movies.sort_values(by=\"profit\")\nmovies\n\n\n# In[11]:\n\n\n#3.\tExtract the top ten profiting movies in descending order and store them in a new dataframe - top10\nmovies=movies.sort_values(by=\"profit\",ascending=False)\nmovies\nmovies.iloc[:10,:] # all columns of top 10 rows\n\n\n# In[12]:\n\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.jointplot(\"budget\",\"profit\",movies)\nplt.show()\n\n\n# In[13]:\n\n\n# find the movies with a negative profit and store them in a new dataframe - neg_profit\nmovies[movies[\"profit\"]<0]\n\n\n# # 2.3: The General Audience and the Critics\n\n# In[14]:\n\n\nmovies.columns\n\n\n# In[15]:\n\n\n#1.\tFirstly you will notice that the MetaCritic score is on a scale so I need to change the scale of MetaCritic\nmovies[\"MetaCritic\"]=movies[\"MetaCritic\"]/10\nmovies\n\n\n# In[16]:\n\n\n#Creating a new column as \"Avg_rating\" by adding \"IMDb_rating\"&\"MetaCritic\" column & devided by 2\nmovies[\"Avg_rating\"]=(movies[\"IMDb_rating\"]+movies[\"MetaCritic\"])/2\n\n\n# In[17]:\n\n\n#sort in decending order the \"Avg_rating\" column\nmovies1=movies[[\"Title\",\"IMDb_rating\",\"MetaCritic\",\"Avg_rating\"]]\nmovies1.loc[abs(movies1[\"IMDb_rating\"]-movies1[\"MetaCritic\"]<0.5)]\n\n\n# In[18]:\n\n\n#find the movies with 'MetaCritic'=\"IMDb_rating\"<0.5 and \"Avg_rating\" of <8\nUniversalAcclaim=movies1.loc[movies1[\"Avg_rating\"]>=8]\nmovies1=movies1.sort_values(by=\"Avg_rating\",ascending=False)\nUniversalAcclaim\n\n\n# # 2.4: Find the Most Popular Trios - I\n\n# In[19]:\n\n\ngroup=movies.pivot_table(values=[\"actor_1_facebook_likes\",\"actor_2_facebook_likes\",\"actor_3_facebook_likes\"],\n aggfunc=\"sum\",index=[\"actor_1_name\",\"actor_2_name\",\"actor_3_name\"])\ngroup\n\n\n# In[20]:\n\n\ngroup[\"Total likes\"]=group[\"actor_1_facebook_likes\"]+group[\"actor_2_facebook_likes\"]+group[\"actor_3_facebook_likes\"]\ngroup\n\n\n# In[21]:\n\n\ngroup.sort_values(by=\"Total likes\",ascending=False,inplace=True)\ngroup\n\n\n# In[22]:\n\n\ngroup.reset_index(inplace=True)\ngroup\n\n\n# In[23]:\n\n\ngroup.iloc[0:5,:]\n\n\n# # 2.5: Find the Most Popular Trios - II\n\n# In[24]:\n\n\nsorted([1,5,2])\n\n\n# In[25]:\n\n\n# Your answer here (optional)\nj=0\nfor i in group[\"Total likes\"]:\n temp=sorted([group.loc[j,\"actor_1_facebook_likes\"],group.loc[j,\"actor_2_facebook_likes\"],group.loc[j,\"actor_3_facebook_likes\"]])\n if temp[0]>= temp[1]/2 and temp[0]>=temp[2]/2 and temp[1]>=temp[2]/2:\n print(sorted([group.loc[j,\"actor_1_name\"],group.loc[j,\"actor_2_name\"],group.loc[j,\"actor_3_name\"]]))\n\n j=j+1\n\n\n# # 2.6: Runtime Analysis\n\n# In[26]:\n\n\n#Plot a histogram or distplot of seaborn to find the Runtime range most of the movies fall into\nplt.hist(movies[\"Runtime\"])\nplt.show()\n\n\n# # 2.7: R-Rated Movies\n\n# In[27]:\n\n\n#Although R rated movies \nmovies.loc[movies[\"content_rating\"]==\"R\"].sort_values(by=\"CVotesU18\",ascending=False)[[\"Title\",\"CVotesU18\"]].head(10)\n\n\n# # 3 : Demographic analysis\n\n# In[28]:\n\n\n#1.First create a new dataframe df_by_genre that contains genre_1, genre_2, and genre_3 and all the columns related to CVotes/Votes from the movies data frame. There are 47 columns to be extracted in total.\ndf_by_genre=movies.loc[:,\"CVotes10\":\"VotesnUS\"]\ndf_by_genre[[\"genre_1\",\"genre_2\",\"genre_3\"]]=movies[[\"genre_1\",\"genre_2\",\"genre_3\"]]\n\n\n# In[29]:\n\n\ndf_by_genre\n\n\n# In[30]:\n\n\n#Add a column called cnt to the dataframe df_by_genre and initialize it to 1 (one)\ndf_by_genre[\"cnt\"]=1\ndf_by_genre\n\n\n# In[31]:\n\n\ndf_by_genre[[\"genre_1\",\"genre_2\",\"genre_3\"]]\n\n\n# In[32]:\n\n\n#3.\tFirst group the dataframe df_by_genre by genre_1 \nimport numpy as np\ndf_by_g1=df_by_genre.groupby(\"genre_1\").aggregate(np.sum)\ndf_by_g2=df_by_genre.groupby(\"genre_2\").aggregate(np.sum)\ndf_by_g3=df_by_genre.groupby(\"genre_3\").aggregate(np.sum)\n\n\n# In[33]:\n\n\ndf_by_g1\n\n\n# In[34]:\n\n\ndf_by_g2\n\n\n# In[35]:\n\n\ndf_by_g3\n\n\n# In[36]:\n\n\n#add the three dataframes and store it in a new dataframe name \"df_add\"\ndf_add=df_by_g1.add(df_by_g2,fill_value=0)\ndf_add=df_add.add(df_by_g3,fill_value=0)\ndf_add\n\n\n# # The column cnt on aggregation has basically kept the track of the number of occurences of each genre.Subset the genres that have atleast 10 movies into a new dataframe genre_top10 based on the cnt column value.\n\n# In[37]:\n\n\ngenre_top_10=df_add.loc[df_add[\"cnt\"]>10]\ngenre_top_10\n\n\n# # Take the mean of all the numeric columns by dividing them with the column value cnt and store it back to the same dataframe. \n\n# In[38]:\n\n\ngenre_top_10.iloc[:,0:-1]=genre_top_10.iloc[:,0:-1].divide(genre_top_10[\"cnt\"],axis=0)\n\n\n# In[39]:\n\n\ngenre_top_10\n\n\n# In[40]:\n\n\n#Round off all the Votes related columns upto two digits after the decimal point.\ngenre_top_10.loc[:,\"VotesM\":\"VotesnUS\"]=round(genre_top_10.loc[:,\"VotesM\":\"VotesnUS\"],2)\n\n\n# In[41]:\n\n\ngenre_top_10\n\n\n# In[42]:\n\n\n#all the CVotes related columns to integers. \ngenre_top_10[genre_top_10.loc[:,\"CVotes10\":\"CVotesnUS\"].columns]=genre_top_10[genre_top_10.loc[:,\"CVotes10\":\"CVotesnUS\"].columns].astype(int)\n\n\n# In[43]:\n\n\ngenre_top_10\n\n\n# In[44]:\n\n\n#Make a bar chart plotting different genres vs cnt using seaborn.\nsns.barplot(x=genre_top_10[\"cnt\"],y=genre_top_10.index)\nplt.show()\n\n\n# In[45]:\n\n\n#1.\tMake the first heatmap to see how the average number of votes of males is varying across the genres. Use seaborn heatmap for this analysis. \nplt.figure(figsize=(20,10))\nplt.subplot(1,2,1)\nax=sns.heatmap(genre_top_10[[\"CVotesU18M\",\"CVotes1829M\",\"CVotes3044M\",\"CVotes45AM\"]])\nplt.subplot(1,2,2)\n\nax=sns.heatmap(genre_top_10[[\"CVotesU18F\",\"CVotes1829F\",\"CVotes3044F\",\"CVotes45AF\"]])\nplt.show()\n\n\n# In[46]:\n\n\n#1.\tMake the first heatmap to see how the average number of votes of males is varying across the genres. Use seaborn heatmap for this analysis. \nplt.figure(figsize=(20,10))\nplt.subplot(1,2,1)\nax=sns.heatmap(genre_top_10[[\"CVotesU18M\",\"CVotes1829M\",\"CVotes3044M\",\"CVotes45AM\"]],annot=True,cmap=\"coolwarm\")\nplt.subplot(1,2,2)\n\nax=sns.heatmap(genre_top_10[[\"CVotesU18F\",\"CVotes1829F\",\"CVotes3044F\",\"CVotes45AF\"]],annot=True,cmap=\"coolwarm\")\nplt.show()\n\n\n# Inferences: A few inferences that can be seen from the heatmap above is that males have voted more than females, and Sci-Fi appears to be most popular among the 18-29 age group irrespective of their gender. What more can you infer from the two heatmaps that you have plotted? Write your three inferences/observations below:\n# \n# Inference 1: Genre romance has got the least number of votes among any age group of males, but there is no such pattern among the females.\n# Inference 2:Action seems to be the more popular genre among the under 18 males, and Animation appears to be the most popular genre among under 18 females.\n# Inference 3: 18-29 age group seems to be most actively voting for any genre irrespective of gender\n\n# In[47]:\n\n\n#2.\tMake the second heatmap to see how the average number of votes of females is varying across the genres. Use seaborn heatmap for this analysis. The X-axis should contain the four age-groups for females, i.e., CVotesU18F,CVotes1829F, CVotes3044F, and CVotes45AF. The Y-axis will have the genres and the annotation in the heatmap tell the average number of votes for that age-female group. \nplt.figure(figsize=(20,10))\nplt.subplot(1,2,1)\nax=sns.heatmap(genre_top_10[[\"VotesU18M\",\"Votes1829M\",\"Votes3044M\",\"Votes45AM\"]],annot=True,cmap=\"coolwarm\")\nplt.subplot(1,2,2)\n\nax=sns.heatmap(genre_top_10[[\"VotesU18F\",\"Votes1829F\",\"Votes3044F\",\"Votes45AF\"]],annot=True,cmap=\"coolwarm\")\nplt.show()\n\n\n# **`Inferences:`** Sci-Fi appears to be the highest rated genre in the age group of U18 for both males and females. Also, females in this age group have rated it a bit higher than the males in the same age group. What more can you infer from the two heatmaps that you have plotted? Write your three inferences/observations below:\n# - Inference 1:The rating among males, seems to be decreasing with increasing age group. there is a similar pattern among females but with a few exceptions.\n# - Inference 2:Crime Gener has the secong higher rating among U18 age group of both males and females, but among U18 Females, It has got the least rating.\n# - Inference 3:Romance Gener has got the least rating among both 45 above Males and Females\n\n# # Subtask 3.4: US vs non-US Cross Analysis\n\n# In[48]:\n\n\nmovies[\"Country\"].value_counts()\n\n\n# In[49]:\n\n\n#Creating IFUS Column\nmovies[\"IFUS\"]=movies[\"Country\"].copy()\n\n\n\n\n# In[50]:\n\n\nmovies.loc[movies[\"IFUS\"]!=\"USA\",\"IFUS\"]=\"non-USA\"\n\n\n# In[51]:\n\n\nmovies[\"IFUS\"].value_counts()\n\n\n# In[52]:\n\n\n# 1_Box plot:- CVotesUS(y) vs IFUS(x)\nplt.figure(figsize=(20,10))\nplt.subplot(1,2,1)\nsns.boxplot(x=\"IFUS\",y=\"CVotesUS\",data=movies)\nplt.subplot(1,2,2)\nsns.boxplot(x=\"IFUS\",y=\"CVotesnUS\",data=movies)\nplt.show()\n\n\n# Inferences: Write your two inferences/observations below:\n# \n# Inference 1:In general US movies have got the high number of vots from both US & Non-US voters when we compare the medians of Box Plot.\n# \n# Inference 2:Non-US movies have a more uniform of distribution of the number of vots as compare to the US movies which is evident from the values of the quartiles.\n\n# In[53]:\n\n\n# 2_Box plot:- VotesUS(y) vs IFUS(x)\nplt.figure(figsize=(20,10))\nplt.subplot(1,2,1)\nsns.boxplot(x=\"IFUS\",y=\"VotesUS\",data=movies)\nplt.subplot(1,2,2)\nsns.boxplot(x=\"IFUS\",y=\"VotesnUS\",data=movies)\nplt.show()\n\n\n# # Inferences: Write your two inferences/observations below:\n# \n# Inference 1:Non US voters have rated both the US & Non-US movies lower compared to the US voters, which is evident from the medians of the quartiles.\n# Inference 2:US movies have received higher ratings fron US voters\n# Inference 3: Some US movies have got exceptionally high ratings from both the USA and Non-US voters. There are no such extreme ratings for any of the Non-US movies.\n\n# - ### Subtask 3.5: Top 1000 Voters Vs Genres\n# \n# You might have also observed the column `CVotes1000`. This column represents the top 1000 voters on IMDb and gives the count for the number of these voters who have voted for a particular movie. Let's see how these top 1000 voters have voted across the genres. \n# \n# 1. Sort the dataframe genre_top10 based on the value of `CVotes1000`in a descending order.\n# \n# 2. Make a seaborn barplot for `genre` vs `CVotes1000`.\n# \n# 3. Write your inferences. You can also try to relate it with the heatmaps you did in the previous subtasks.\n# \n# \n# \n\n# In[57]:\n\n\n# Sorting by CVotes1000\ngenre_top_10=genre_top_10.sort_values(\"CVotes1000\",ascending=False)\ngenre_top_10\n\n\n# In[63]:\n\n\ngenre_top_10[\"CVotes1000\"]\n\n\n# In[67]:\n\n\n#Bar Plt\nplt.figure(figsize=(12,5))\nsns.barplot(genre_top_10.index,genre_top_10[\"CVotes1000\"])\nplt.show()\n\n# Inferences: Write your two inferences/observations below:\n\nInference 1:The voting pattern here almost ressembles the pattern in age group vs genre heat maps \nInference 2: Although drama genre has the highest number of movies, the average number of top users who have rated it less as compare to other genre, Which have lesser number of movies\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7857142686843872, "avg_line_length": 20, "blob_id": "97bb0ea65121af42015af2ac136f9816eb39bf4e", "content_id": "2ab74c82aff08581fec1193769db6082f7d45561", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 42, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/README.md", "repo_name": "ShaikhAbdulRahi/My-Projects", "src_encoding": "UTF-8", "text": "# My-Projects\nThis is my first Repository\n" } ]
2
kuksss/ego2
https://github.com/kuksss/ego2
70adcdf896a64f9ec6e0be113cbbeafade5fc0bf
da90c925d0a8043a8ff2a98c14090aff32a5ddd6
5f799e2106541f4ed78bd11e5610ea4c8d8c05ee
refs/heads/master
2021-06-11T02:44:16.366874
2017-01-15T18:10:08
2017-01-15T18:10:08
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6830769181251526, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 20.66666603088379, "blob_id": "f6adf5dce7424689dc27dfcb3c034af3d71ee18a", "content_id": "2f26daf9a2946a2cf99d63a7bca4253c95703417", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 325, "license_type": "no_license", "max_line_length": 75, "num_lines": 15, "path": "/README.md", "repo_name": "kuksss/ego2", "src_encoding": "UTF-8", "text": "## How to run\n\nOpen a terminal in a correct directory.\n\nInstall required packages `$ pip install -r requirements.txt` if necessary.\n\nRun `$ python ego2.py`.\n\nGo to the web browser and open an URL: http://127.0.0.1:5000/\n\n## Requirements\n\n* GNU/Linux\n* Python 3.x\n* Python modules: networkx, flask, requests, json, webbrowser\n" }, { "alpha_fraction": 0.837837815284729, "alphanum_fraction": 0.837837815284729, "avg_line_length": 7.75, "blob_id": "6aa7ae426b23982e7ef409f706a74a2636f55b40", "content_id": "f828d131771e7ed2394393deec8d9d4f5f578a64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 37, "license_type": "no_license", "max_line_length": 10, "num_lines": 4, "path": "/requirements.txt", "repo_name": "kuksss/ego2", "src_encoding": "UTF-8", "text": "flask\nnetworkx\nrequests\nwebbrowser\n\n\n" }, { "alpha_fraction": 0.5981388688087463, "alphanum_fraction": 0.6026193499565125, "avg_line_length": 32.74418640136719, "blob_id": "abc232914914153ba98381476f87e1209dd14267", "content_id": "d1eeaa7ef00f345b55a4c904ded03a03cfdac4e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5811, "license_type": "no_license", "max_line_length": 136, "num_lines": 172, "path": "/nxego.py", "repo_name": "kuksss/ego2", "src_encoding": "UTF-8", "text": "\"\"\"\nPython module including:\n- KRS API requesting logic\n- NetworkX graph creation logic\n\"\"\"\n\nimport os\nimport networkx as nx\nfrom networkx.readwrite import json_graph\nimport requests\nimport json\nimport datetime\n\nCHILDREN_AGE_DIFFERENCE_RANGE = range(20, 35)\nSPOUSE_AGE_DIFFERENCE_RANGE = range(0, 5)\n\ndef search_people(query):\n \"\"\"\n GET request from KRS API\n search people with given name nad last name\n return list of dicts of people to choose\n \"\"\"\n\n r = requests.get('https://api-v3.mojepanstwo.pl/dane/krs_osoby.json?conditions[q]='+query)\n j = r.json()\n\n people = []\n count = 1\n for person in j['Dataobject']:\n people.append({'no':count,'id':person['id'], 'name':person['data']['krs_osoby.imiona']+' '+person['data']['krs_osoby.nazwisko'],\n 'date_of_birth':person['data']['krs_osoby.data_urodzenia']})\n count = count + 1\n\n # print('Wyszukane osoby: ')\n # for person in people:\n # print(str(person['no']) + '\\t' + person['name'] + ' ' + person['date_of_birth'] + '\\n')\n\n return people # lista słowników\n\n\ndef create_graph(person_no, people):\n \"\"\"\n Choose person's id\n GET request from KRS API\n Create NetworkX Graph from person's JSON data\n Dump JSON graph data into /template subfolder\n \"\"\"\n\n person = people[person_no-1]\n person_id = person['id'] # choose person id\n r = requests.get('https://api-v3.mojepanstwo.pl/dane/krs_osoby/' + person_id + '.json?layers[]=graph')\n person_json = r.json()\n\n people_ids = []\n\n G = nx.Graph()\n # create nodes\n for node in person_json['layers']['graph']['nodes']:\n # person node\n if 'osoba' in node['id']:\n # ego person node\n if person_json['id'] in node['id']:\n G.add_node(node['id'], name=node['data']['imiona'] + ' ' + node['data']['nazwisko'],\n group='ego', attributes=node['data'])\n # other person node\n else:\n people_ids.append(node['id'])\n G.add_node(node['id'], name=node['data']['imiona'] + ' ' + node['data']['nazwisko'],\n group='osoba', attributes=node['data'])\n # institution node\n elif 'podmiot' in node['id']:\n G.add_node(node['id'], name=node['data']['nazwa'], attributes=node['data'],\n group='podmiot')\n\n # create edges\n for edge in person_json['layers']['graph']['relationships']:\n G.add_edge(edge['start'], edge['end'], relation=edge['type'])\n\n relatives = get_relatives(people_ids, person_json['data']['krs_osoby.nazwisko'], person['id'])\n add_relatives_to_graph(relatives, person_json, G)\n\n # dump G graph to JSON file\n d = json_graph.node_link_data(G)\n json.dump(d, open(os.getcwd()+'/static/ego.json', 'w'), indent=4, separators=(',', ': '))\n\n\ndef get_relatives(people, ego_name, ego_id):\n\n relatives = []\n\n people_ids = list(map(lambda person_id: person_id.replace('osoba', ''), people))\n for id in people_ids:\n new_relatives = get_related_people_from_ego(id, ego_name, ego_id)\n relatives.extend(new_relatives)\n\n return relatives\n\n\ndef get_related_people_from_ego(person_id, ego_name, ego_id):\n\n r = requests.get('https://api-v3.mojepanstwo.pl/dane/krs_osoby/' + person_id + '.json?layers[]=graph')\n person_json = r.json()\n\n relatives = []\n\n for person in person_json['layers']['graph']['nodes']:\n id = person['id'].replace('osoba', '')\n if 'osoba' in person['id'] and id != ego_id:\n if match_names(ego_name, person['data']['nazwisko']):\n relatives.append(person)\n\n return relatives\n\n\ndef match_names(ego_name, person_name):\n if (ego_name == person_name or has_similar_names(ego_name, person_name) or\n name_contains(ego_name, person_name) or name_contains(person_name, ego_name)):\n return True\n else:\n return False\n\n\ndef has_similar_names(a, b):\n return a[:-1] == b[:-1]\n\ndef name_contains(name, other_name):\n if name[:-1].endswith(other_name[:-1]):\n return True\n else:\n return False\n\ndef add_relatives_to_graph(relatives, person, graph):\n\n person_graph_id = person['layers']['graph']['root']\n\n for relative in relatives:\n\n person_gender = person['data']['krs_osoby.plec']\n relative_gender = relative['data']['plec']\n person_birth_date = person['data']['krs_osoby.data_urodzenia']\n relative_birth_date = relative['data']['data_urodzenia']\n\n age_difference = get_age_difference(person_birth_date, relative_birth_date)\n\n #is spouse\n if abs(age_difference) in SPOUSE_AGE_DIFFERENCE_RANGE and person_gender != relative_gender:\n graph.add_node(relative['id'], name=relative['data']['imiona'] + ' ' + relative['data']['nazwisko'],\n group='rodzina', attributes=relative['data'])\n graph.add_edge(person_graph_id, relative['id'], relation='MOŻLIWE MAŁŻEŃSTWO')\n #is child\n elif abs(age_difference) in CHILDREN_AGE_DIFFERENCE_RANGE:\n\n graph.add_node(relative['id'], name=relative['data']['imiona'] + ' ' + relative['data']['nazwisko'],\n group='rodzina', attributes=relative['data'])\n\n if age_difference < 0:\n graph.add_edge(person_graph_id, relative['id'], relation='MOŻLIWE DZIECKO')\n else:\n graph.add_edge(person_graph_id, relative['id'], relation='MOŻLIWY RODZIC')\n\n\ndef get_age_difference(birthdate_1, birthdate_2):\n\n year1 = extract_birth_year(birthdate_1)\n year2 = extract_birth_year(birthdate_2)\n\n return year1 - year2\n\n\ndef extract_birth_year(birth_string):\n date = datetime.datetime.strptime(birth_string, '%Y-%m-%d').date()\n return date.year" }, { "alpha_fraction": 0.6437956094741821, "alphanum_fraction": 0.6518248319625854, "avg_line_length": 22.620689392089844, "blob_id": "224e5cee959340403fdbccc873a7b246b55f6526", "content_id": "b4aaf3d1a851ee90bcf1a94183d585573e39429f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1370, "license_type": "no_license", "max_line_length": 89, "num_lines": 58, "path": "/ego2.py", "repo_name": "kuksss/ego2", "src_encoding": "UTF-8", "text": "\"\"\"\nFlask app module - server logic\n\"\"\"\n\nimport sys\nimport webbrowser\nfrom flask import Flask, request, redirect, url_for, render_template, send_from_directory\nimport nxego\n\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/get_query', methods=['POST'])\ndef get_query():\n query = request.form['query']\n return redirect(url_for('show_list', query=query))\n\n\[email protected]('/show_list')\ndef show_list():\n global _people\n query = request.args['query']\n people = nxego.search_people(query)\n _people = people # save result to global variable\n return render_template('list.html', query=query, people=people)\n\n\[email protected]('/get_person', methods=['POST'])\ndef get_person():\n person_no = request.form['user_id']\n return redirect(url_for('draw_graph', person_no=person_no))\n\n\[email protected]('/draw_graph')\ndef draw_graph():\n global _people\n person_no = int(request.args['person_no'])\n nxego.create_graph(person_no, _people) # all NetworkX logic here\n return redirect(url_for('static', filename='ego.html'))\n\n\ndef main():\n global _people # global variable that stores nxego.search_people(query)\n webbrowser.open('http://127.0.0.1:5000/')\n app.run()\n\nif __name__ == '__main__':\n try:\n main()\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n raise\n" } ]
4
arome/honey-finder
https://github.com/arome/honey-finder
ff67977d47d1f422810ff4cf4fd4a33cf224aade
7438e6ef8778eca5b98c37de8a95b27bd81d5d7e
55498067b48a3799ed9d11da8077a045fa56b6c0
refs/heads/master
2021-09-10T15:44:20.444574
2018-03-28T19:13:08
2018-03-28T19:13:08
126,086,540
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.43776077032089233, "alphanum_fraction": 0.44662725925445557, "avg_line_length": 47.17948532104492, "blob_id": "7266bb26033e8b9cab18064611e9ba91b5d09724", "content_id": "390954441b42cfbdd86960e02acaa71b1225ce2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5752, "license_type": "no_license", "max_line_length": 182, "num_lines": 117, "path": "/honey-finder.py", "repo_name": "arome/honey-finder", "src_encoding": "UTF-8", "text": "import os, datetime, timeit, shutil, time, sys\r\nclear = lambda: os.system('cls')\r\n\r\n#times = '8:50'\r\n#before_time = time.mktime(datetime.datetime.strptime(date+'/'+times, \"%d/%m/%Y/%H:%M\").timetuple())\r\n\r\ndef printWelcome():\r\n welcomeMessage = '''\r\n /$$ /$$ /$$$$$$$$ /$$ /$$ \r\n | $$ | $$ | $$_____/|__/ | $$ \r\n | $$ | $$ /$$$$$$ /$$$$$$$ /$$$$$$ /$$ /$$ | $$ /$$ /$$$$$$$ /$$$$$$$ /$$$$$$ /$$$$$$ \r\n | $$$$$$$$ /$$__ $$| $$__ $$ /$$__ $$| $$ | $$ | $$$$$ | $$| $$__ $$ /$$__ $$ /$$__ $$ /$$__ $$\r\n | $$__ $$| $$ \\ $$| $$ \\ $$| $$$$$$$$| $$ | $$ | $$__/ | $$| $$ \\ $$| $$ | $$| $$$$$$$$| $$ \\__/\r\n | $$ | $$| $$ | $$| $$ | $$| $$_____/| $$ | $$ | $$ | $$| $$ | $$| $$ | $$| $$_____/| $$ \r\n | $$ | $$| $$$$$$/| $$ | $$| $$$$$$$| $$$$$$$ | $$ | $$| $$ | $$| $$$$$$$| $$$$$$$| $$ \r\n |__/ |__/ \\______/ |__/ |__/ \\_______/ \\____ $$ |__/ |__/|__/ |__/ \\_______/ \\_______/|__/ \r\n /$$ | $$ \r\n | $$$$$$/ \r\n \\______/ \r\n '''\r\n print(welcomeMessage)\r\n\r\ndef printCurrentState(name, sdate, edate, numFiles, currentPosition, errorsFound, time):\r\n clear()\r\n if time:\r\n print(\"Scanned \" + str(numFiles) + (\" entries\" if numFiles > 1 else \" entry\")+\" in \" + \"{:10.2f}\".format(time) + \" second\" + (\"s.\" if time > 1 else \".\"))\r\n else:\r\n print(\"Scanning \" + str(numFiles) + (\" entries\" if numFiles > 1 else \" entry\")+\" in the directory \" + dirpath + \")\"\r\n print(statusBar(currentPosition, 0 if time else 1))\r\n if not errorsFound:\r\n print(\"No entry found for \" + name + (\" between \" + sdate + \" and \" + edate if sdate is not None else \"\")) \r\n else:\r\n print(str(len(errorsFound)) + \" error\" + (\"s\" if len(errorsFound) > 1 else \"\") + \" found for \" + name + (\" between \" + sdate + \" and \" + edate if sdate is not None else \"\")) \r\n print(errorsFound)\r\n\r\ndef statusBar(num, showPercentage):\r\n bar = '['\r\n for x in range(num):\r\n bar +='#'\r\n restOfBar = 104-num\r\n if showPercentage:\r\n percentage = str(num) +'% '\r\n bar += ' '+ percentage\r\n restOfBar -= (len(percentage)+1)\r\n for x in range(restOfBar):\r\n bar += ' '\r\n bar += ']'\r\n return bar\r\n\r\ndef searchFileContent(dirpath, filename, name):\r\n is_accessible = os.access(dirpath + filename,os.F_OK) #Check if you have access, this should be a path\r\n if is_accessible == True: #If you don't, create the path\r\n with open(dirpath + filename,'r') as f:\r\n if name in f.readline():\r\n return filename\r\n\r\ndef main():\r\n printWelcome()\r\n dirpath = \"\\\\\\\\10.104.5.45\\\\Elmah.Errors\\\\\"\r\n customDir = 1 if input(\"Do you want to use a custom directory? (y/n) : \").lower() == \"y\" else 0\r\n question = \"Which user are you looking for? : \" \r\n limitToXML = 0\r\n if customDir:\r\n dirpath = input(\"Please enter the path to your directory : \")\r\n if dirpath[-1] != \"\\\\\":\r\n dirpath += \"\\\\\"\r\n limitToXML = 1 if input(\"Only search XML files? (y/n) : \").lower() == \"y\" else 0\r\n question = \"Text you're searching for? : \"\r\n sbyDate = 1 if input(\"Search by date? (y/n) : \").lower() == \"y\" else 0\r\n sdate, edate = None, None\r\n if sbyDate:\r\n sdate = datetime.datetime.strptime(input(\"Enter Date in (yyyy-mm-dd) format: \"), \"%Y-%m-%d\")\r\n edate = sdate + datetime.timedelta(days=1) \r\n after_date = time.mktime(sdate.timetuple())\r\n before_date = time.mktime(edate.timetuple())\r\n name = input(question)\r\n\r\n start_time = timeit.default_timer()\r\n\r\n currentPercentage, count = 0, 0\r\n errorsFound = []\r\n try:\r\n numFiles = len(os.listdir(dirpath))\r\n fileFound = []\r\n for filename in os.listdir(dirpath):\r\n if \".\" in filename and (not limitToXML or filename.endswith(\"xml\")):\r\n if sbyDate:\r\n if os.path.getmtime(dirpath+filename) > after_date and os.path.getmtime(dirpath+filename) < before_date:\r\n filename = searchFileContent(dirpath, filename, name)\r\n else:\r\n filename = searchFileContent(dirpath, filename, name)\r\n if filename is not None:\r\n errorsFound.append(filename)\r\n if currentPercentage < int(count*100/numFiles):\r\n printCurrentState(dirpath, name, sdate, edate, numFiles, int(count*100/numFiles), errorsFound, 0)\r\n currentPercentage = count*100/numFiles\r\n count+=1\r\n\r\n end_time = timeit.default_timer()\r\n runTime = end_time - start_time\r\n printCurrentState(dirpath, name, sdate, edate, numFiles, int(count*100/numFiles)+4, errorsFound, runTime)\r\n except FileNotFoundError as err:\r\n if \"network\" in str(err):\r\n print(\"Make sure you are connected to the company's VPN.\")\r\n else:\r\n print(\"This directory is not found\")\r\n except PermissionError as perm:\r\n print(\"Missing permission to access:\", perm)\r\n raise\r\n except:\r\n print(\"Unexpected error:\", sys.exc_info()[0])\r\n raise\r\n finally:\r\n print(\"The execution will now be terminated\")\r\n\r\nif __name__ == \"__main__\":\r\n main()" } ]
1
vkvam/webvtt-py
https://github.com/vkvam/webvtt-py
9b975c552b33d978541f81d9c1a47c7831347959
502bf59a91edc20661bf87552a1d127835343aba
1e47fdfb8603e12f619fbe3e4481ecac90085fd8
refs/heads/master
2023-06-11T18:31:51.671408
2023-06-05T10:34:12
2023-06-05T10:34:12
90,861,025
0
0
MIT
2017-05-10T12:20:28
2017-05-10T12:20:30
2023-06-05T08:35:55
Python
[ { "alpha_fraction": 0.594135046005249, "alphanum_fraction": 0.6015278697013855, "avg_line_length": 36.92523193359375, "blob_id": "360104c03c991f4e44a3b586d024d39c602580cc", "content_id": "06dfa2fef6162e1855af5b96f5a622940529d198", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4058, "license_type": "permissive", "max_line_length": 116, "num_lines": 107, "path": "/webvtt/segmenter.py", "repo_name": "vkvam/webvtt-py", "src_encoding": "UTF-8", "text": "from math import ceil, floor\nfrom .generic import Caption\nfrom webvtt.main import WebVTT\nfrom concurrent import futures\nMPEGTS = 0\nSECONDS = 200 # default number of seconds per segment\n\n\nclass WebVTTSegmenter(object):\n \"\"\"\n Provides segmentation of WebVTT captions for HTTP Live Streaming (HLS).\n \"\"\"\n def __init__(self):\n self._total_segments = 0\n self._output_writer = ''\n self._seconds = 0\n self._mpegts = 0\n self._segments = []\n\n def _validate_webvtt(self, webvtt):\n # Validates that the captions is a list and all the captions are instances of Caption.\n if not isinstance(webvtt, WebVTT):\n return False\n for c in webvtt.captions:\n if not isinstance(c, Caption):\n return False\n return True\n\n def _slice_segments(self, captions):\n self._segments = [[] for _ in range(self.total_segments)]\n\n for c in captions:\n segment_index_start = int(floor(float(c.start_in_seconds) / float(self.seconds)))\n self.segments[segment_index_start].append(c)\n\n # Also include a caption in other segments based on the end time.\n segment_index_end = int(floor(float(c.end_in_seconds) / float(self.seconds)))\n if segment_index_end > segment_index_start:\n for i in range(segment_index_start + 1, segment_index_end + 1):\n self.segments[i].append(c)\n\n @staticmethod\n def _write_segment(args):\n writer, index, mpegts, captions = args\n with writer.open('fileSequence{}.webvtt'.format(index)) as f:\n f.write('WEBVTT\\n')\n f.write('X-TIMESTAMP-MAP=MPEGTS:{},LOCAL:00:00.000\\n'.format(mpegts))\n for caption in captions:\n f.write('\\n{} --> {}\\n'.format(caption.start, caption.end))\n f.writelines(caption.lines)#['{}\\n'.format(l) for l in caption.lines])\n\n def _write_segments(self):\n work_list = []\n for index in range(self.total_segments):\n work = (self._output_writer, index, self._mpegts, [])\n for caption in self.segments[index]:\n work[3].append(caption)\n work_list.append(work)\n\n with futures.ThreadPoolExecutor(max_workers=100) as executor:\n segmenter_result = executor.map(self._write_segment, work_list)\n executor.shutdown(wait=True)\n\n def _write_manifest(self, captions, target_seconds=SECONDS):\n with self._output_writer.open('prog_index.m3u8') as f:\n f.write('#EXTM3U\\n')\n f.write('#EXT-X-TARGETDURATION:{}\\n'.format(self.seconds))\n f.write('#EXT-X-VERSION:5\\n')\n f.write('#EXT-X-PLAYLIST-TYPE:VOD\\n')\n\n remaining_seconds = captions[-1].end_in_seconds\n\n for i in range(self.total_segments):\n segment_length = \"{0:.3f}\".format(min(target_seconds,remaining_seconds))\n f.write('#EXTINF:{0}\\n'.format(segment_length))\n f.write('fileSequence{}.webvtt\\n'.format(i))\n remaining_seconds-=target_seconds\n\n f.write('#EXT-X-ENDLIST\\n')\n\n def segment(self, webvtt, output='', seconds=SECONDS, mpegts=MPEGTS):\n \"\"\"Segments the captions based on a number of seconds.\"\"\"\n captions = WebVTT().read(webvtt).captions\n\n self._total_segments = 0 if not captions else int(ceil(float(captions[-1].end_in_seconds) / float(seconds)))\n self._output_writer = output\n self._seconds = seconds\n self._mpegts = mpegts\n\n self._slice_segments(captions)\n self._write_segments()\n self._write_manifest(captions, seconds)\n\n @property\n def seconds(self):\n \"\"\"Returns the number of seconds used for segmenting captions.\"\"\"\n return self._seconds\n\n @property\n def total_segments(self):\n \"\"\"Returns the total of segments.\"\"\"\n return self._total_segments\n\n @property\n def segments(self):\n \"\"\"Return the list of segments.\"\"\"\n return self._segments\n" }, { "alpha_fraction": 0.6049327254295349, "alphanum_fraction": 0.610762357711792, "avg_line_length": 27.96103858947754, "blob_id": "2fe5d74c5dd861bf37456fcadc6bd1d63ba4836e", "content_id": "976e93a6c0079949242c009cb3b2489bbe8d3930", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2230, "license_type": "permissive", "max_line_length": 108, "num_lines": 77, "path": "/webvtt/sub_io.py", "repo_name": "vkvam/webvtt-py", "src_encoding": "UTF-8", "text": "import boto3\n\nfrom webvtt.generic import GenericReader, GenericWriter\nimport os\nfrom io import StringIO\n\n\nclass FileReader(GenericReader):\n def __init__(self, filename):\n self.filename = filename\n\n def readlines(self):\n with open(self.filename) as f:\n return [line.rstrip() for line in f.readlines()]\n\n\nclass FileWriter(GenericWriter):\n def __init__(self, folder):\n output_folder = os.path.join(os.getcwd(), folder)\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n self.folder = output_folder\n\n def open(self, name):\n file_path = os.path.join(self.folder, name)\n return open(file_path, 'w')\n\n\nclass StringReader(GenericReader):\n def __init__(self, filename):\n self.content = filename\n\n def readlines(self):\n return [line.rstrip() for line in StringIO(self.content).readlines()]\n\n\nclass S3FileLike(object):\n\n def __init__(self, bucket, key, client, headers, ACL='private'):\n self.bucket, self.key, self.client = bucket, key, client\n self.content = []\n self.headers = headers\n self.ACL = ACL\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.client.Object(self.bucket, self.key).put(\n Body=''.join(self.content),\n ACL=self.ACL,\n **self.headers\n )\n\n def write(self, content):\n self.content.append(content)\n\n def writelines(self, content):\n for f in content:\n self.content.append(f+'\\n')\n\n\nclass S3ObjectWriter(GenericWriter):\n\n def __init__(self, bucket, key_prefix, s3_resource=None, ACL='private'):\n super(S3ObjectWriter, self).__init__()\n self.bucket = bucket\n self.key_prefix = key_prefix\n self.s3_resource = boto3.resource('s3') if s3_resource is None else s3_resource\n self.ACL = ACL\n\n def open(self, key, ACL=None):\n file_type = key.split(\".\")[-1]\n headers = {} if file_type not in self.type_map else self.type_map[file_type]\n if ACL is None:\n ACL = self.ACL\n return S3FileLike(self.bucket, '{}/{}'.format(self.key_prefix, key), self.s3_resource, headers, ACL)\n" }, { "alpha_fraction": 0.5390051603317261, "alphanum_fraction": 0.5733895301818848, "avg_line_length": 30.44871711730957, "blob_id": "84b9f2f2b5305c8c147f1bfa9d8a395aa212d53d", "content_id": "763878ff037556b0ffbfb0fc5d0a67a6de26ca1b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7358, "license_type": "permissive", "max_line_length": 115, "num_lines": 234, "path": "/tests/webvtt.py", "repo_name": "vkvam/webvtt-py", "src_encoding": "UTF-8", "text": "import os\nimport unittest\nfrom shutil import rmtree, copy\n\nfrom webvtt import WebVTT\nfrom webvtt.exceptions import MissingFilenameError, MalformedCaptionError\nfrom webvtt.main import SUPPORTED_FORMATS\nfrom webvtt.generic import Caption\n\nBASE_DIR = os.path.dirname(__file__)\nSUBTITLES_DIR = os.path.join(BASE_DIR, 'subtitles')\nOUTPUT_DIR = os.path.join(BASE_DIR, 'output')\n\n\nclass WebVTTTestCase(unittest.TestCase):\n\n def setUp(self):\n self.webvtt = WebVTT()\n\n def _get_file(self, filename):\n return os.path.join(SUBTITLES_DIR, filename)\n\n def tearDown(self):\n if os.path.exists(OUTPUT_DIR):\n rmtree(OUTPUT_DIR)\n\n def test_create_caption(self):\n caption = Caption('00:00:00.500', '00:00:07.000', ['Caption test line 1', 'Caption test line 2'])\n self.assertEqual(caption.start, '00:00:00.500')\n self.assertEqual(caption.start_in_seconds, 0.5)\n self.assertEqual(caption.end, '00:00:07.000')\n self.assertEqual(caption.end_in_seconds, 7)\n self.assertEqual(caption.lines, ['Caption test line 1', 'Caption test line 2'])\n\n def test_save_captions(self):\n os.makedirs(OUTPUT_DIR)\n copy(self._get_file('one_caption.vtt'), OUTPUT_DIR)\n\n self.webvtt.read(os.path.join(OUTPUT_DIR, 'one_caption.vtt'))\n new_caption = Caption('00:00:07.000', '00:00:11.890', ['New caption text line1', 'New caption text line2'])\n self.webvtt.captions.append(new_caption)\n self.webvtt.save()\n\n with open(os.path.join(OUTPUT_DIR, 'one_caption.vtt'), 'r', encoding='utf-8') as f:\n lines = [line.rstrip() for line in f.readlines()]\n\n expected_lines = [\n 'WEBVTT',\n '',\n '00:00:00.500 --> 00:00:07.000',\n 'Caption text #1',\n '',\n '00:00:07.000 --> 00:00:11.890',\n 'New caption text line1',\n 'New caption text line2'\n ]\n\n self.assertListEqual(lines, expected_lines)\n\n def test_srt_conversion(self):\n os.makedirs(OUTPUT_DIR)\n copy(self._get_file('one_caption.srt'), OUTPUT_DIR)\n\n self.webvtt.from_srt(os.path.join(OUTPUT_DIR, 'one_caption.srt'))\n self.webvtt.save()\n\n self.assertTrue(os.path.exists(os.path.join(OUTPUT_DIR, 'one_caption.vtt')))\n\n with open(os.path.join(OUTPUT_DIR, 'one_caption.vtt'), 'r', encoding='utf-8') as f:\n lines = [line.rstrip() for line in f.readlines()]\n\n expected_lines = [\n 'WEBVTT',\n '',\n '00:00:00.500 --> 00:00:07.000',\n 'Caption text #1',\n ]\n\n self.assertListEqual(lines, expected_lines)\n\n def test_sbv_conversion(self):\n os.makedirs(OUTPUT_DIR)\n copy(self._get_file('two_captions.sbv'), OUTPUT_DIR)\n\n self.webvtt.from_sbv(os.path.join(OUTPUT_DIR, 'two_captions.sbv'))\n self.webvtt.save()\n\n self.assertTrue(os.path.exists(os.path.join(OUTPUT_DIR, 'two_captions.vtt')))\n\n with open(os.path.join(OUTPUT_DIR, 'two_captions.vtt'), 'r', encoding='utf-8') as f:\n lines = [line.rstrip() for line in f.readlines()]\n\n expected_lines = [\n 'WEBVTT',\n '',\n '00:00:00.378 --> 00:00:11.378',\n 'Caption text #1',\n '',\n '00:00:11.378 --> 00:00:12.305',\n 'Caption text #2 (line 1)',\n 'Caption text #2 (line 2)',\n ]\n\n self.assertListEqual(lines, expected_lines)\n\n def test_save_to_other_location(self):\n target_path = os.path.join(OUTPUT_DIR, 'test_folder')\n os.makedirs(target_path)\n\n self.webvtt.read(self._get_file('one_caption.vtt')).save(target_path)\n self.assertTrue(os.path.exists(os.path.join(target_path, 'one_caption.vtt')))\n\n def test_save_specific_filename(self):\n target_path = os.path.join(OUTPUT_DIR, 'test_folder')\n os.makedirs(target_path)\n output_file = os.path.join(target_path, 'custom_name.vtt')\n\n self.webvtt.read(self._get_file('one_caption.vtt')).save(output_file)\n self.assertTrue(os.path.exists(output_file))\n\n def test_save_specific_filename_no_extension(self):\n target_path = os.path.join(OUTPUT_DIR, 'test_folder')\n os.makedirs(target_path)\n output_file = os.path.join(target_path, 'custom_name')\n\n self.webvtt.read(self._get_file('one_caption.vtt')).save(output_file)\n self.assertTrue(os.path.exists(os.path.join(target_path, 'custom_name.vtt')))\n\n def test_caption_timestamp_update(self):\n c = Caption('00:00:00.500', '00:00:07.000')\n c.start = '00:00:01.750'\n c.end = '00:00:08.250'\n\n self.assertEqual(c.start, '00:00:01.750')\n self.assertEqual(c.end, '00:00:08.250')\n\n def test_caption_text(self):\n c = Caption(text=['Caption line #1', 'Caption line #2'])\n self.assertEqual(\n c.text,\n 'Caption line #1\\nCaption line #2'\n )\n\n def test_caption_receive_text(self):\n c = Caption(text='Caption line #1\\nCaption line #2')\n\n self.assertEqual(\n len(c.lines),\n 2\n )\n self.assertEqual(\n c.text,\n 'Caption line #1\\nCaption line #2'\n )\n\n def test_supported_formats(self):\n self.assertListEqual(\n WebVTT().supported_formats(),\n [sf[0] for sf in SUPPORTED_FORMATS]\n )\n\n def test_update_text(self):\n c = Caption(text='Caption line #1')\n c.text = 'Caption line #1 updated'\n self.assertEqual(\n c.text,\n 'Caption line #1 updated'\n )\n\n def test_update_text_multiline(self):\n c = Caption(text='Caption line #1')\n c.text = 'Caption line #1\\nCaption line #2'\n\n self.assertEqual(\n len(c.lines),\n 2\n )\n\n self.assertEqual(\n c.text,\n 'Caption line #1\\nCaption line #2'\n )\n\n def test_update_text_wrong_type(self):\n c = Caption(text='Caption line #1')\n\n self.assertRaises(\n AttributeError,\n setattr,\n c,\n 'text',\n 123\n )\n\n def test_manipulate_lines(self):\n c = Caption(text=['Caption line #1', 'Caption line #2'])\n c.lines[0] = 'Caption line #1 updated'\n self.assertEqual(\n c.lines[0],\n 'Caption line #1 updated'\n )\n\n def test_captions(self):\n self.webvtt.read(self._get_file('sample.vtt'))\n self.assertIsInstance(self.webvtt.captions, list)\n\n def test_captions_prevent_write(self):\n self.webvtt.read(self._get_file('sample.vtt'))\n self.assertRaises(\n AttributeError,\n setattr,\n self.webvtt,\n 'captions',\n []\n )\n\n def test_sequence_iteration(self):\n self.webvtt.read(self._get_file('sample.vtt'))\n self.assertIsInstance(self.webvtt[0], Caption)\n self.assertEqual(len(self.webvtt), len(self.webvtt.captions))\n\n def test_save_no_filename(self):\n webvtt = WebVTT()\n self.assertRaises(\n MissingFilenameError,\n webvtt.save\n )\n\n def test_malformed_start_timestamp(self):\n self.assertRaises(\n MalformedCaptionError,\n Caption,\n '01:00'\n )" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.7652173638343811, "avg_line_length": 22.200000762939453, "blob_id": "7788949dcd1aa64c99466754ad92e96fae93159c", "content_id": "0a5e22eeda6db8af63a87b2f20002a3812eca377", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "permissive", "max_line_length": 38, "num_lines": 5, "path": "/webvtt/__init__.py", "repo_name": "vkvam/webvtt-py", "src_encoding": "UTF-8", "text": "from .main import WebVTT\nfrom .segmenter import WebVTTSegmenter\nfrom .generic import Caption\n\n__version__ = '0.4.0'" }, { "alpha_fraction": 0.5844889283180237, "alphanum_fraction": 0.5865433812141418, "avg_line_length": 35.05555725097656, "blob_id": "0c2b6fcfcbb99cc9f1c90b20200667aaab3f74f5", "content_id": "c2b746d3b43e6d510d54d682e3b3236e2e80ef5c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3894, "license_type": "permissive", "max_line_length": 99, "num_lines": 108, "path": "/webvtt/main.py", "repo_name": "vkvam/webvtt-py", "src_encoding": "UTF-8", "text": "import os\nimport re\n\nfrom .parsers import WebVTTParser, SRTParser, SBVParser\nfrom webvtt.exceptions import MissingFilenameError\n\nSUPPORTED_FORMATS = (\n ('WebVTT (.vtt)', WebVTTParser), # default parser for WebVTT format\n ('SubRip (.srt)', SRTParser), # parser for SRT format\n ('YouTube SBV (.sbv)', SBVParser), # parser for YouTube SBV format\n)\n\n\nclass WebVTT(object):\n \"\"\"\n Parse captions in WebVTT format and also from other formats like SRT.\n\n To read WebVTT:\n\n WebVTT().read('captions.vtt')\n\n For other formats like SRT, use from_[format in lower case]:\n\n WebVTT().from_srt('captions.srt')\n\n A list of all supported formats is available calling supported_formats().\n \"\"\"\n\n FORMAT_EXTENSION_PATTERN = re.compile('.+\\(\\.(.+)\\)')\n\n def __init__(self):\n self._captions = []\n self.file = ''\n\n # create methods dynamically to read captions based on the supported types\n # read() is created for WebVTT and from_[FORMAT]() for the other formats.\n for name, parser_class in SUPPORTED_FORMATS:\n extension = re.match(self.FORMAT_EXTENSION_PATTERN, name).group(1)\n method_name = 'read' if parser_class is WebVTTParser else 'from_{}'.format(extension)\n\n setattr(self.__class__, method_name, self._set_reader(method_name, name, parser_class))\n\n def __len__(self):\n return len(self._captions)\n\n def __getitem__(self, index):\n return self._captions[index]\n\n def _set_reader(self, name, format_name, parser_class):\n def f(self, file):\n self.file = file\n self._captions = parser_class().read(file).captions\n return self\n\n f.__name__ = name\n if parser_class is WebVTTParser:\n f.__doc__ = 'Reads a WebVTT captions file.'\n else:\n f.__doc__ = 'Reads captions from a file in {} format.'.format(format_name)\n return f\n\n def save(self, output=''):\n \"\"\"Save the document.\n If no output is provided the file will be saved in the same location. Otherwise output\n can determine a target directory or file.\n \"\"\"\n if not output:\n if not self.file:\n raise MissingFilenameError\n # saving an original vtt file will overwrite the file\n # and for files read from other formats will save as vtt\n # with the same name and location\n self.file = os.path.splitext(self.file)[0] + '.vtt'\n else:\n target = os.path.join(os.getcwd(), output)\n if os.path.isdir(target):\n # if an output is provided and it is a directory\n # the file will be saved in that location with the same name\n filename = os.path.splitext(os.path.basename(self.file))[0]\n self.file = os.path.join(target, '{}.vtt'.format(filename))\n else:\n if target[-3:].lower() != 'vtt':\n target += '.vtt'\n # otherwise the file will be written in the specified location\n self.file = target\n\n with open(self.file, 'w') as f:\n f.write('WEBVTT\\n')\n for c in self._captions:\n f.write('\\n{} --> {}\\n'.format(c.start, c.end))\n f.writelines(['{}\\n'.format(l) for l in c.lines])\n\n @staticmethod\n def supported_formats():\n \"\"\"Provides a list of supported formats that this class can read from.\"\"\"\n return [f[0] for f in SUPPORTED_FORMATS]\n\n @property\n def captions(self):\n \"\"\"Returns the list of captions.\"\"\"\n return self._captions\n\n @property\n def total_length(self):\n \"\"\"Returns the total length of the captions.\"\"\"\n if not self._captions:\n return 0\n return int(self._captions[-1].end_in_seconds) - int(self._captions[0].start_in_seconds)\n" }, { "alpha_fraction": 0.567624568939209, "alphanum_fraction": 0.5750066041946411, "avg_line_length": 29.588708877563477, "blob_id": "a5ba554cbdcb82ea2d8593064cdd2cd75200eeb0", "content_id": "824dd61424c672be52f6394b8433a9634ffa28de", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3793, "license_type": "permissive", "max_line_length": 112, "num_lines": 124, "path": "/webvtt/parsers.py", "repo_name": "vkvam/webvtt-py", "src_encoding": "UTF-8", "text": "import re\n\nfrom webvtt.exceptions import MalformedFileError, MalformedCaptionError\nfrom webvtt.generic import GenericParser, Caption\n\n\nclass TextBasedParser(GenericParser):\n \"\"\"\n Parser for plain text caption files.\n This is a generic class, do not use directly.\n \"\"\"\n\n TIMEFRAME_LINE_PATTERN = ''\n\n def _read_content(self, file):\n lines = file.readlines()\n\n if not lines:\n raise MalformedFileError('The file is empty.')\n\n return lines\n\n def _parse_timeframe_line(self, line):\n \"\"\"Parse timeframe line and return start and end timestamps.\"\"\"\n tf = self._validate_timeframe_line(line)\n if not tf:\n raise MalformedCaptionError('Invalid time format')\n\n return tf.group(1), tf.group(2)\n\n def _validate_timeframe_line(self, line):\n return re.match(self.TIMEFRAME_LINE_PATTERN, line)\n\n def _is_timeframe_line(self, line):\n \"\"\"\n This method returns True if the line contains the timeframes.\n To be implemented by child classes.\n \"\"\"\n return False\n\n def _should_skip_line(self, line, index, caption):\n \"\"\"\n This method returns True for a line that should be skipped.\n To be implemented by child classes.\n \"\"\"\n return False\n\n def _parse(self, lines):\n c = None\n\n for index, line in enumerate(lines):\n if self._should_skip_line(line, index, c): # allow child classes to skip lines based on the content\n continue\n\n if self._is_timeframe_line(line):\n try:\n start, end = self._parse_timeframe_line(line)\n except MalformedCaptionError as e:\n raise MalformedCaptionError('{} in line! {}'.format(e, index + 1))\n c = Caption(start, end)\n elif line:\n if c is None:\n raise MalformedCaptionError('Caption missing timeframe in line {}.'.format(index + 1))\n else:\n c.add_line(line)\n else:\n if c is None:\n continue\n if not c.lines:\n raise MalformedCaptionError('Caption missing text in line {}.'.format(index + 1))\n\n self.captions.append(c)\n c = None\n\n if c is not None and c.lines:\n self.captions.append(c)\n\n\nclass SRTParser(TextBasedParser):\n \"\"\"\n SRT parser.\n \"\"\"\n\n TIMEFRAME_LINE_PATTERN = re.compile('\\s*(\\d+:\\d{2},\\d{3})\\s*-->\\s*(\\d+:\\d{2},\\d{3})')\n\n def _validate(self, lines):\n if len(lines) < 2 or lines[0] != '1' or not self._validate_timeframe_line(lines[1]):\n raise MalformedFileError('The file does not have a valid format.')\n\n def _is_timeframe_line(self, line):\n return '-->' in line\n\n def _should_skip_line(self, line, index, caption):\n return caption is None and line.isdigit()\n\n\nclass WebVTTParser(SRTParser):\n \"\"\"\n WebVTT parser.\n \"\"\"\n\n TIMEFRAME_LINE_PATTERN = re.compile('\\s*((?:\\d+:){1,2}\\d{2}.\\d{3})\\s*-->\\s*((?:\\d+:){1,2}\\d{2}.\\d{3})')\n\n def _validate(self, lines):\n if 'WEBVTT' not in lines[0]:\n raise MalformedFileError('The file does not have a valid format')\n\n def _should_skip_line(self, line, index, caption):\n return index == 0 and line == 'WEBVTT'\n\n\nclass SBVParser(TextBasedParser):\n \"\"\"\n YouTube SBV parser.\n \"\"\"\n\n TIMEFRAME_LINE_PATTERN = re.compile('\\s*(\\d+:\\d{2}.\\d{3}),(\\d+:\\d{2}.\\d{3})')\n\n def _validate(self, lines):\n if not self._validate_timeframe_line(lines[0]):\n raise MalformedFileError('The file does not have a valid format')\n\n def _is_timeframe_line(self, line):\n return self._validate_timeframe_line(line)\n" } ]
6
Oliver0047/Campus-Chatbot
https://github.com/Oliver0047/Campus-Chatbot
a051b09c48153e7c22b9bd519a75c6fae0c066d0
283fdf3f6b6361e9b9e3bf973703f005a96f23a8
f49be5efbdcbb13ac80d748bc2a3abfd50000dfd
refs/heads/master
2020-03-26T17:51:10.017590
2018-08-18T06:42:35
2018-08-18T06:42:35
145,184,200
4
1
null
null
null
null
null
[ { "alpha_fraction": 0.6882640719413757, "alphanum_fraction": 0.7420538067817688, "avg_line_length": 57.42856979370117, "blob_id": "96223f0be6254145458fd98b6b471295e5b52cf6", "content_id": "ec776a9bb2cd118e17209751fab39a857400c635", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 968, "license_type": "no_license", "max_line_length": 108, "num_lines": 14, "path": "/README.md", "repo_name": "Oliver0047/Campus-Chatbot", "src_encoding": "UTF-8", "text": "# Campus-Chatbot<br>\n* data文件夹: 储存语料和词表<br>\n* model文件夹: 储存模型参数<br>\n* static文件夹: 储存网页的CSS和JS配置文件<br>\n* templates文件夹: 储存HTML网页<br>\n* 代码(基于Pytorch)<br>\n>[preprocessing.py](https://github.com/Oliver0047/Campus-Chatbot/blob/master/preprocessing.py): 中文语料预处理<br>\n>[seq2seq.py](https://github.com/Oliver0047/Campus-Chatbot/blob/master/seq2seq.py): Seq2Seq网络的训练和预测<br>\n>[html2py.py](https://github.com/Oliver0047/Campus-Chatbot/blob/master/html2py.py): 使用Flask实现域名映射和信息交互<br>\n<center>\n<img src=\"https://github.com/Oliver0047/Campus-Chatbot/blob/master/result1.JPG\" width=\"25%\" height=\"25%\" />\n<img src=\"https://github.com/Oliver0047/Campus-Chatbot/blob/master/result2.JPG\" width=\"25%\" height=\"25%\" />\n<img src=\"https://github.com/Oliver0047/Campus-Chatbot/blob/master/result3.JPG\" width=\"25%\" height=\"25%\" />\n</center>\n" }, { "alpha_fraction": 0.5213196277618408, "alphanum_fraction": 0.5272331237792969, "avg_line_length": 36.36046600341797, "blob_id": "6c7f4718f1233233be8de90a88df82a3e51c87c6", "content_id": "4089001e440c4b7f232cb5e37aa2110dd5be22d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3445, "license_type": "no_license", "max_line_length": 104, "num_lines": 86, "path": "/preprocessing.py", "repo_name": "Oliver0047/Campus-Chatbot", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\n#autor:Oliver\nimport jieba\n\nclass preprocessing():\n __PAD__ = 0#填充符\n __EOS__ = 1#结束符\n __GO__ = 2#开始符\n __UNK__ = 3#未知符\n vocab = ['__PAD__', '__EOS__', '__GO__','__UNK__']\n def __init__(self):\n self.encoderFile = \"./data/question.txt\"#问题\n self.decoderFile = \"./data/answer.txt\"#回答\n self.savePath = './data/'#储存路径\n jieba.load_userdict(\"./data/supplementvocab.txt\")#选择jieba的中文分词字典\n \n def wordToVocabulary(self, originFile, vocabFile, segementFile):\n vocabulary = []\n sege = open(segementFile, \"w\",encoding='utf-8')\n with open(originFile, 'r',encoding='utf-8') as en:\n for sent in en.readlines():\n if \"enc\" in segementFile:\n words = jieba.lcut(sent.strip())#jieba分词,返回列表\n print(words)\n else:\n words = jieba.lcut(sent.strip())\n vocabulary.extend(words)#初步形成字典\n for word in words:#储存每行分词结果\n sege.write(word+\" \")\n sege.write(\"\\n\")\n sege.close()\n\n # 去重并存入词典\n vocab_file = open(vocabFile, \"w\",encoding='utf-8')\n _vocabulary = list(set(vocabulary))\n _vocabulary.sort(key=vocabulary.index)\n _vocabulary = self.vocab + _vocabulary#加入特殊符号形成最终字典\n if \"enc\" in segementFile:\n print('encode_vocab_length: ',len(_vocabulary))\n else:\n print('decode_vocab_length: ',len(_vocabulary))\n for index, word in enumerate(_vocabulary):\n vocab_file.write(word+\"\\n\")\n vocab_file.close()\n\n def toVec(self, segementFile, vocabFile, doneFile):\n word_dicts = {}\n vec = []\n with open(vocabFile, \"r\",encoding='utf-8') as dict_f:#将字典封装成索引词表\n for index, word in enumerate(dict_f.readlines()):\n word_dicts[word.strip()] = index\n\n f = open(doneFile, \"w\",encoding='utf-8')\n #如果单独或者连续输入未知符号,则回答未知符号\n if \"enc.vec\" in doneFile:\n f.write(\"3 3 3 3\\n\")\n f.write(\"3\\n\")\n elif \"dec.vec\" in doneFile:\n f.write(str(word_dicts.get(\"other\", 3))+\"\\n\")\n f.write(str(word_dicts.get(\"other\", 3))+\"\\n\")\n with open(segementFile, \"r\",encoding='utf-8') as sege_f:\n for sent in sege_f.readlines():\n sents = [i.strip() for i in sent.split(\" \")[:-1]]\n vec.extend(sents)\n for word in sents:\n f.write(str(word_dicts.get(word))+\" \")#将字词转为索引号\n f.write(\"\\n\")\n f.close()\n \n\n def main(self):\n # 获得字典\n self.wordToVocabulary(self.encoderFile, self.savePath+'enc.vocab', self.savePath+'enc.segement')\n self.wordToVocabulary(self.decoderFile, self.savePath+'dec.vocab', self.savePath+'dec.segement')\n # 转向量\n self.toVec(self.savePath+\"enc.segement\", \n self.savePath+\"enc.vocab\", \n self.savePath+\"enc.vec\")\n self.toVec(self.savePath+\"dec.segement\", \n self.savePath+\"dec.vocab\", \n self.savePath+\"dec.vec\")\n\n\nif __name__ == '__main__':\n pre = preprocessing()\n pre.main()\n" }, { "alpha_fraction": 0.6455696225166321, "alphanum_fraction": 0.6624472737312317, "avg_line_length": 23.517240524291992, "blob_id": "488bd557f02343c9254ef0cbfce82a1500f3cdb7", "content_id": "d62314a941f08cef5c8c2ad236c8ff4ca934e477", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 711, "license_type": "no_license", "max_line_length": 49, "num_lines": 29, "path": "/html2py.py", "repo_name": "Oliver0047/Campus-Chatbot", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\n#autor:Oliver\nfrom flask import jsonify \nimport json\nfrom seq2seq import seq2seq\nfrom flask import Flask, render_template, request\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport sys\nsys.path.append('.')\nchat=seq2seq()\napp = Flask('chatbot')\[email protected]('/')\ndef gethtml(): \n chat.prepare()\n return render_template('index.html')\[email protected]('/predict',methods=['POST','GET'])\ndef predict():\n mydata = json.loads(request.get_data())\n data=mydata['question']\n try:\n \tpred=chat.predict_one(data)\n except:\n chat.prepare()\n pred=chat.predict_one(data)\n return jsonify(result=pred)\n\nif __name__=='__main__':\n app.run(host=\"0.0.0.0\",port=5010)\n" }, { "alpha_fraction": 0.5795133113861084, "alphanum_fraction": 0.5878937840461731, "avg_line_length": 39.342159271240234, "blob_id": "50d12091b7c3dbdc7c11fcd68407ce89b797b9ef", "content_id": "ecfa3a03284e0b6e86e17496fcc67bf1db456fb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21886, "license_type": "no_license", "max_line_length": 165, "num_lines": 491, "path": "/seq2seq.py", "repo_name": "Oliver0047/Campus-Chatbot", "src_encoding": "UTF-8", "text": "# -*- coding: UTF-8 -*-\n#autor:Oliver\nimport os\nimport random\nimport sys\nimport time\nimport jieba\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.autograd import Variable\nimport sys\nsys.path.append('.')\nUSE_CUDA = torch.cuda.is_available()#如果有GPU可以使用,那么使用GPU计算\nEOS_token = 1#结束符\nSOS_token = 2#开始符\nf=open('data/enc.vocab','r',encoding='utf-8')\nenc_vocab=f.readlines()\nenc_len=len(enc_vocab)#编码表长度\nf.flush()\nf.close()\nf=open('data/dec.vocab','r',encoding='utf-8')\ndec_vocab=f.readlines()\ndec_len=len(dec_vocab)#解码表长度\nf.flush()\nf.close()\ndel(enc_vocab)#消去变量\ndel(dec_vocab)\n\nclass EncoderRNN(nn.Module):#编码器\n def __init__(self, input_size, hidden_size, n_layers=1):\n super(EncoderRNN, self).__init__()\n\n self.input_size = input_size#输入大小,指问句中每个字或词的索引的one-hot编码维度,即编码表的大小\n self.hidden_size = hidden_size#隐含层大小\n self.n_layers = n_layers#RNN层数\n\n self.embedding = nn.Embedding(input_size, hidden_size)#形成词向量\n self.gru = nn.GRU(hidden_size, hidden_size, n_layers)#门控循环神经网络\n\n def forward(self, word_inputs, hidden):\n seq_len = len(word_inputs)\n embedded = self.embedding(word_inputs).view(seq_len, 1, -1)\n output, hidden = self.gru(embedded, hidden)\n return output, hidden\n\n def init_hidden(self):\n hidden = Variable(torch.zeros(self.n_layers, 1, self.hidden_size))\n if USE_CUDA: hidden = hidden.cuda()\n return hidden\n\n\nclass Attn(nn.Module):#注意力机制\n def __init__(self, method, hidden_size, max_length):\n super(Attn, self).__init__()\n\n self.method = method\n self.hidden_size = hidden_size\n\n if self.method == 'general':\n self.attn = nn.Linear(self.hidden_size, hidden_size)\n\n elif self.method == 'concat':\n self.attn = nn.Linear(self.hidden_size * 2, hidden_size)\n self.other = nn.Parameter(torch.FloatTensor(1, hidden_size))\n\n def forward(self, hidden, encoder_outputs):\n seq_len = len(encoder_outputs)\n\n attn_energies = Variable(torch.zeros(seq_len)) \n if USE_CUDA: attn_energies = attn_energies.cuda()\n\n for i in range(seq_len):\n attn_energies[i] = self.score(hidden, encoder_outputs[i])#计算权重\n\n return F.softmax(attn_energies).unsqueeze(0).unsqueeze(0)#利用softmax将权重归一化\n\n def score(self, hidden, encoder_output):\n if self.method == 'dot':\n energy = torch.dot(hidden.view(-1), encoder_output.view(-1))\n return energy\n\n elif self.method == 'general':\n energy = self.attn(encoder_output)\n energy = torch.dot(hidden.view(-1), encoder_output.view(-1))#torch.dot指各个元素相乘然后相加,和numpy不同\n return energy\n\nclass AttnDecoderRNN(nn.Module):#加入了注意力机制的解码器\n def __init__(self, attn_model, hidden_size, output_size, n_layers=1, dropout_p=0.1, max_length=10):\n super(AttnDecoderRNN, self).__init__()\n\n self.attn_model = attn_model\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.n_layers = n_layers\n self.dropout_p = dropout_p\n self.max_length = max_length\n\n self.embedding = nn.Embedding(output_size, hidden_size)\n self.gru = nn.GRU(hidden_size * 2, hidden_size, n_layers, dropout=dropout_p)\n self.out = nn.Linear(hidden_size * 2, output_size)\n\n if attn_model != 'none':\n self.attn = Attn(attn_model, hidden_size, self.max_length)\n\n def forward(self, word_input, last_context, last_hidden, encoder_outputs):\n\n word_embedded = self.embedding(word_input).view(1, 1, -1) #解码器输入转词向量\n\n rnn_input = torch.cat((word_embedded, last_context.unsqueeze(0)), 2)#将词向量与上一个背景向量连接\n rnn_output, hidden = self.gru(rnn_input, last_hidden)#rnn_output相当于当下解码器输出的上下文环境\n\n attn_weights = self.attn(rnn_output.squeeze(0), encoder_outputs)#利用这个上下文环境计算新的背景向量权重\n context = attn_weights.bmm(encoder_outputs.transpose(0, 1))#形成新的背景向量\n\n rnn_output = rnn_output.squeeze(0)\n context = context.squeeze(1) \n output = F.log_softmax(self.out(torch.cat((rnn_output, context), 1)))#根绝输入输出的上下文环境计算解码器当下的输出\n return output, context, hidden, attn_weights\n\n\nclass seq2seq(nn.Module):\n def __init__(self):\n super(seq2seq, self).__init__()\n self.max_epoches = 5000#最大训练次数\n self.batch_index = 0#从第0个问答序列开始\n self.GO_token = 2\n self.EOS_token = 1\n \n self.input_size = 1500#编码器词表大小\n self.output_size = 1500#解码器词表大小\n self.hidden_size = 1024\n self.max_length = 15#句长\n self.show_epoch = 100#每训练一百次显示一次训练数据\n self.use_cuda = USE_CUDA\n self.model_path = \"./model/\"\n self.n_layers = 1\n self.dropout_p = 0.05\n self.beam_search = True#使用束搜索\n self.top_k = 5#选择可能性最大的5个序列\n self.alpha = 0.5#惩罚因子\n\t\n self.enc_vec = []#编码表\n self.dec_vec = []#解码表\n\n # 初始化encoder和decoder\n self.encoder = EncoderRNN(self.input_size, self.hidden_size, self.n_layers)\n self.decoder = AttnDecoderRNN('general', self.hidden_size, self.output_size, self.n_layers, self.dropout_p, self.max_length)\n\n if USE_CUDA:\n self.encoder = self.encoder.cuda()\n self.decoder = self.decoder.cuda()\n\n #设置优化器\n self.encoder_optimizer = optim.Adam(self.encoder.parameters())\n self.decoder_optimizer = optim.Adam(self.decoder.parameters())\n #设置损失函数\n self.criterion = nn.NLLLoss()\n\n def loadData(self):#导入编码数据和解码数据\n with open(\"./data/enc.vec\") as enc:\n line = enc.readline()\n while line:\n self.enc_vec.append(line.strip().split())\n line = enc.readline()\n\n with open(\"./data/dec.vec\") as dec:\n line = dec.readline()\n while line:\n self.dec_vec.append(line.strip().split())\n line = dec.readline()\n\n def next(self, batch_size, eos_token=1, go_token=2, shuffle=False):#取一份数据\n inputs = []\n targets = []\n\n if shuffle:#随机选择一行数据\n ind = random.choice(range(len(self.enc_vec)))\n enc = [self.enc_vec[ind]]\n dec = [self.dec_vec[ind]]\n else:#按顺序选择一个batch数据\n if self.batch_index+batch_size >= len(self.enc_vec):\n enc = self.enc_vec[self.batch_index:]\n dec = self.dec_vec[self.batch_index:]\n self.batch_index = 0\n else:\n enc = self.enc_vec[self.batch_index:self.batch_index+batch_size]\n dec = self.dec_vec[self.batch_index:self.batch_index+batch_size]\n self.batch_index += batch_size\n for index in range(len(enc)):\n #限制长度\n enc = enc[0][:self.max_length] if len(enc[0]) > self.max_length else enc[0]\n dec = dec[0][:self.max_length] if len(dec[0]) > self.max_length else dec[0]\n\n enc = [int(i) for i in enc]\n dec = [int(i) for i in dec]\n dec.append(eos_token)#为解码数据添加结束符\n\n inputs.append(enc)\n targets.append(dec)\n\n inputs = Variable(torch.LongTensor(inputs)).transpose(1, 0).contiguous()#封装为变量,并保证在一个内存块上\n targets = Variable(torch.LongTensor(targets)).transpose(1, 0).contiguous()\n if USE_CUDA:\n inputs = inputs.cuda()\n targets = targets.cuda()\n return inputs, targets\n\n def train(self):#训练\n self.loadData()\n try:#如果有已知模型,就在已知模型上继续训练\n self.load_state_dict(torch.load(self.model_path+'params.pkl'))\n except Exception as e:\n print(e)\n print(\"No model!\")\n loss_track = []\n\n for epoch in range(self.max_epoches):\n start = time.time()\n inputs, targets = self.next(1, shuffle=False)#取出一份数据\n loss, logits = self.step(inputs, targets, self.max_length)#返回损失值和输出\n loss_track.append(loss)\n _,v = torch.topk(logits, 1)#取出可能性最高的输出\n pre = v.cpu().data.numpy().T.tolist()[0][0]\n tar = targets.cpu().data.numpy().T.tolist()[0]\n stop = time.time()\n if epoch % self.show_epoch == 0:\n print(\"-\"*50)\n print(\"epoch:\", epoch)\n print(\" loss:\", loss)\n print(\" target:%s\\n output:%s\" % (tar, pre))\n print(\" per-time:\", (stop-start))\n torch.save(self.state_dict(), self.model_path+'params.pkl')\n\n def step(self, input_variable, target_variable, max_length):#一份数据前向传播,反向传播,参数更新\n teacher_forcing_ratio = 0.1\n clip = 5.0#梯度裁剪,防止梯度爆炸,这是RNN经常会出现的问题\n loss = 0 \n #每次训练将梯度归零\n self.encoder_optimizer.zero_grad()\n self.decoder_optimizer.zero_grad()\n\n input_length = input_variable.size()[0]\n target_length = target_variable.size()[0]\n\n encoder_hidden = self.encoder.init_hidden()\n encoder_outputs, encoder_hidden = self.encoder(input_variable, encoder_hidden)#编码\n\n decoder_input = Variable(torch.LongTensor([[SOS_token]]))\n decoder_context = Variable(torch.zeros(1, self.decoder.hidden_size))\n decoder_hidden = encoder_hidden \n if USE_CUDA:\n decoder_input = decoder_input.cuda()\n decoder_context = decoder_context.cuda()\n\n decoder_outputs = []\n use_teacher_forcing = random.random() < teacher_forcing_ratio#随机切换方式\n use_teacher_forcing = True\n if use_teacher_forcing:#使用正确的标签数据作为下一次解码器输入\n for di in range(target_length):\n decoder_output, decoder_context, decoder_hidden, decoder_attention = self.decoder(decoder_input, decoder_context, decoder_hidden, encoder_outputs)#解码\n loss += self.criterion(decoder_output, target_variable[di])#累计损失\n decoder_input = target_variable[di]\n decoder_outputs.append(decoder_output.unsqueeze(0))\n else:#使用当下解码器输出作为下一次解码器输入\n for di in range(target_length):\n decoder_output, decoder_context, decoder_hidden, decoder_attention = self.decoder(decoder_input, decoder_context, decoder_hidden, encoder_outputs)\n loss += self.criterion(decoder_output, target_variable[di])\n decoder_outputs.append(decoder_output.unsqueeze(0))\n topv, topi = decoder_output.data.topk(1)\n ni = topi[0][0]\n decoder_input = Variable(torch.LongTensor([[ni]]))\n if USE_CUDA: decoder_input = decoder_input.cuda()\n if ni == EOS_token: break\n loss.backward()#梯度反向传播\n torch.nn.utils.clip_grad_norm(self.encoder.parameters(), clip)#梯度裁剪\n torch.nn.utils.clip_grad_norm(self.decoder.parameters(), clip)\n self.encoder_optimizer.step()#参数优化\n self.decoder_optimizer.step()\n decoder_outputs = torch.cat(decoder_outputs, 0)#解码器输出\n return loss.data[0] / target_length, decoder_outputs\n\n def input_deal(self, input_vec):#将编码器输入向量限制长度,并封装为变量\n inputs = []\n enc = input_vec[:self.max_length] if len(input_vec) > self.max_length else input_vec#向量限制长度\n inputs.append(enc)\n inputs = Variable(torch.LongTensor(inputs)).transpose(1, 0).contiguous()#封装为变量\n if USE_CUDA:\n inputs = inputs.cuda()\n return inputs\n \n def prepare(self):\n try:\n self.load_state_dict(torch.load(self.model_path+'params.pkl'))#如果有模型就加载\n except Exception as e:\n print(e)\n print(\"No model!\")\n # 加载字典\n self.str_to_vec = {}\n with open(\"./data/enc.vocab\") as enc_vocab:\n for index,word in enumerate(enc_vocab.readlines()):\n self.str_to_vec[word.strip()] = index\n\n self.vec_to_str = {}\n with open(\"./data/dec.vocab\") as dec_vocab:\n for index,word in enumerate(dec_vocab.readlines()):\n self.vec_to_str[index] = word.strip()\n \n \n def predict_one(self,data):\n # 字符串转向量\n segement = jieba.lcut(data.strip())\n input_vec = [self.str_to_vec.get(i, 3) for i in segement]\n input_vec = self.input_deal(input_vec)#向量处理\n\n samples = self.beamSearchDecoder(input_vec)#得到概率top5的结果\n samples.sort(key=lambda x:-x[3])\n sample=samples[0]#取出概率最大的序列结果\n outstrs = []\n for i in sample[0]:\n if i == 1:\n break\n outstrs.append(self.vec_to_str.get(i, \"Un\"))#序列转字符\n if (\"Un\" in outstrs) or (\"__UNK__\" in outstrs):\n return \"风太大,我听不见><\"\n return \"\".join(outstrs)\n \n def predict(self):#预测\n try:\n self.load_state_dict(torch.load(self.model_path+'params.pkl'))#如果有模型就加载\n except Exception as e:\n print(e)\n print(\"No model!\")\n loss_track = []\n\n # 加载字典\n str_to_vec = {}\n with open(\"./data/enc.vocab\",encoding='utf-8') as enc_vocab:\n for index,word in enumerate(enc_vocab.readlines()):\n str_to_vec[word.strip()] = index\n\n vec_to_str = {}\n with open(\"./data/dec.vocab\",encoding='utf-8') as dec_vocab:\n for index,word in enumerate(dec_vocab.readlines()):\n vec_to_str[index] = word.strip()\n\n while True:\n input_strs = input(\">> \")\n # 字符串转向量\n segement = jieba.lcut(input_strs)\n input_vec = [str_to_vec.get(i, 3) for i in segement]\n input_vec = self.input_deal(input_vec)#向量处理\n\n # 选择序列输出方式\n if self.beam_search:#采用beam search\n samples = self.beamSearchDecoder(input_vec)#得到概率top5的结果\n samples.sort(key=lambda x:-x[3])\n sample=samples[0]#取出概率最大的序列结果\n outstrs = []\n for i in sample[0]:\n if i == 1:\n break\n outstrs.append(vec_to_str.get(i, \"Un\"))#序列转字符\n print(\"小电 > \", \"\".join(outstrs))\n else:#普通的序列输出\n logits = self.normal_search(input_vec)#按照每个时刻选择最高概率的字符输出,得到最终序列\n _,v = torch.topk(logits, 1)\n pre = v.cpu().data.numpy().T.tolist()[0][0]\n outstrs = []\n for i in pre:\n if i == 1:\n break\n outstrs.append(vec_to_str.get(i, \"Un\"))\n print(\"小电 > \", \"\".join(outstrs))\n\n def normal_search(self, input_variable):#按照每个时刻选择最高概率的字符输出,得到最终序列\n input_length = input_variable.size()[0]\n\n encoder_hidden = self.encoder.init_hidden()\n encoder_outputs, encoder_hidden = self.encoder(input_variable, encoder_hidden)\n\n decoder_input = Variable(torch.LongTensor([[SOS_token]]))\n decoder_context = Variable(torch.zeros(1, self.decoder.hidden_size))\n decoder_hidden = encoder_hidden\n if USE_CUDA:\n decoder_input = decoder_input.cuda()\n decoder_context = decoder_context.cuda()\n decoder_outputs = []\n\n for i in range(self.max_length):\n decoder_output, decoder_context, decoder_hidden, decoder_attention = self.decoder(decoder_input, decoder_context, decoder_hidden, encoder_outputs)\n decoder_outputs.append(decoder_output.unsqueeze(0))\n topv, topi = decoder_output.data.topk(1)\n ni = topi[0][0]\n decoder_input = Variable(torch.LongTensor([[ni]])) #使用当下解码器输出作为下一次解码器输入\n if USE_CUDA: decoder_input = decoder_input.cuda()\n if ni == EOS_token: break\n\n decoder_outputs = torch.cat(decoder_outputs, 0)\n return decoder_outputs\n\n def tensorToList(self, tensor):#tensor转list\n return tensor.cpu().data.numpy().tolist()[0]\n\n def beamSearchDecoder(self, input_variable):#Beam Search算法\n input_length = input_variable.size()[0]\n encoder_hidden = self.encoder.init_hidden()\n encoder_outputs, encoder_hidden = self.encoder(input_variable, encoder_hidden)\n\n decoder_input = Variable(torch.LongTensor([[SOS_token]]))\n decoder_context = Variable(torch.zeros(1, self.decoder.hidden_size))\n decoder_hidden = encoder_hidden\n if USE_CUDA:\n decoder_input = decoder_input.cuda()\n decoder_context = decoder_context.cuda()\n\n decoder_output, decoder_context, decoder_hidden, decoder_attention = self.decoder(decoder_input, decoder_context, decoder_hidden, encoder_outputs)\n topk = decoder_output.data.topk(self.top_k)#输入开始符,得到前5大概率的输出字符以及对应信息\n samples = [[] for i in range(self.top_k)]\n dead_k = 0\n final_samples = []\n for index in range(self.top_k):#储存前5大概率的输出字符,以及对应的分数,背景向量等\n topk_prob = topk[0][0][index]\n topk_index = int(topk[1][0][index])\n samples[index] = [[topk_index], topk_prob, 0, 0, decoder_context, decoder_hidden, decoder_attention, encoder_outputs]\n\n for _ in range(self.max_length):\n tmp = []\n for index in range(len(samples)):\n tmp.extend(self.beamSearchInfer(samples[index], index))#对每个储存的字符序列,继续预测下一个输出字符,保留前5大概率的字符输出\n samples = []\n\n # 筛选出topk\n df = pd.DataFrame(tmp)#封装成数据帧格式\n df.columns = ['sequence', 'pre_socres', 'fin_scores', \"ave_scores\", \"decoder_context\", \"decoder_hidden\", \"decoder_attention\", \"encoder_outputs\"]\n sequence_len = df.sequence.apply(lambda x:len(x))#取出序列长度\n df['ave_scores'] = df['fin_scores'] / sequence_len#计算平均分\n df = df.sort_values('ave_scores', ascending=False).reset_index().drop(['index'], axis=1)#根据平均分从大到小排序\n df = df[:(self.top_k-dead_k)]#最多取5个带结束符的序列\n for index in range(len(df)):\n group = df.ix[index]#取出序列已经对应信息\n if group.tolist()[0][-1] == 1:#如果该序列的结尾是结束符\n final_samples.append(group.tolist())#那就加入最终输出序列组中\n df = df.drop([index], axis=0)#舍弃该序列\n dead_k += 1#表示需要的序列数量减一\n #print(\"drop {}, {}\".format(group.tolist()[0], dead_k))\n samples = df.values.tolist()\n if len(samples) == 0:#如果已经没有序列了,那就可以结束了\n break\n\n if len(final_samples) < self.top_k:\n final_samples.extend(samples[:(self.top_k-dead_k)])#如果最终序列的数量不够,那就取几个概率较大的补上\n return final_samples\n\n def beamSearchInfer(self, sample, k):#计算已知序列的下一个输出字符,并计算分数\n samples = []\n decoder_input = Variable(torch.LongTensor([[sample[0][-1]]]))\n if USE_CUDA:\n decoder_input = decoder_input.cuda()\n sequence, pre_scores, fin_scores, ave_scores, decoder_context, decoder_hidden, decoder_attention, encoder_outputs = sample\n decoder_output, decoder_context, decoder_hidden, decoder_attention = self.decoder(decoder_input, decoder_context, decoder_hidden, encoder_outputs)\n\n # choose topk\n topk = decoder_output.data.topk(self.top_k)\n for k in range(self.top_k):\n topk_prob = topk[0][0][k]#取出该字符概率\n topk_index = int(topk[1][0][k])#取出该字符索引\n pre_scores += topk_prob#分数累加\n fin_scores = pre_scores - (k - 1 ) * self.alpha#加入惩罚因子\n #数据更新\n samples.append([sequence+[topk_index], pre_scores, fin_scores, ave_scores, decoder_context, decoder_hidden, decoder_attention, encoder_outputs])\n return samples\n\n def retrain(self):#从头开始训练\n try:\n os.remove(self.model_path)\n except Exception as e:\n pass\n self.train()\n\nif __name__ == '__main__':\n seq = seq2seq()\n if sys.argv[1] == 'train':#训练模式\n seq.train()\n elif sys.argv[1] == 'predict':#预测模式\n seq.predict()\n elif sys.argv[1] == 'retrain':#从头开始训练\n seq.retrain()\n" } ]
4
infogrind/massrename
https://github.com/infogrind/massrename
a5312370a67d5db53629bdb477ed6c93c3d932af
783d8e7944287390b3b126f5c2766785ce86f87b
41b6905adbb017b5604c77a895d162444eafa8a6
refs/heads/master
2020-07-01T17:16:05.828465
2016-11-20T11:00:18
2016-11-20T11:00:18
74,267,825
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5933300256729126, "alphanum_fraction": 0.5953210592269897, "avg_line_length": 23.40081024169922, "blob_id": "fab378e245d2eb015a6122c1e14f5ecbf2f80bd8", "content_id": "27d95bf732175dfb39457027a2567e614a7c1b01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6027, "license_type": "no_license", "max_line_length": 82, "num_lines": 247, "path": "/massrename", "repo_name": "infogrind/massrename", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys;\nimport os;\nimport re;\nimport getopt;\n\n# Global options.\nverbose = False # Display extra output.\nrecursive = True # Process directories recursively.\nforce = False # Don't ask for any confirmation. (Dangerous, but won't\n # overwrite existing files.\nignorecase = False # Ignore case in regular expressions?\n\n\ndef main():\n args = parse_options(sys.argv[1:])\n\n if not len(args) == 3:\n print \"Syntax error.\"\n usage()\n sys.exit(2)\n\n # The base directory.\n b = args[0]\n if not os.path.exists(b):\n sys.stderr.write(\"Directory \" + b + \" does not exist.\\n\")\n sys.exit(1)\n\n # The episode pattern and the replacement test\n if ignorecase:\n p = re.compile(args[1], re.I)\n else:\n p = re.compile(args[1]);\n r = args[2];\n\n # The actual stuff!\n mass_rename(b, p, r)\n\n\ndef usage():\n print \"\"\"Usage: massrename [options] <directory> <pattern> <replacement>\n\n Massrename renames all files in <directory> that match the regular expression\n <pattern> using the substitution string <replacement>. The substitution string\n can contain \\\\1, \\\\2, ... if there are corresponding groups in the pattern.\n\n Options:\n -h Show this help text.\n -v Display verbose output.\n -i Ignore case in regular expression.\n -r Recursive mode.\n -f Force mode: does not ask for confirmation before renaming.\n -r Recursive mode\n \"\"\"\n\n\ndef parse_options(args):\n\n # Access global variables.\n global verbose, recursive, force, ignorecase\n\n try:\n opts, args = getopt.getopt(args, \"hvrfi\")\n except getopt.GetoptError, err:\n print str(err)\n usage()\n sys.exit(2)\n\n # Defaults\n verbose = False\n recursive = False\n\n for o, a in opts:\n if o == \"-h\":\n usage()\n sys.exit();\n elif o == \"-v\":\n verbose = True\n elif o == \"-r\":\n recursive = True\n elif o == \"-f\":\n force = True\n elif o == \"-i\":\n ignorecase = True\n else:\n assert False, \"unhandled option\"\n\n return args\n\n\ndef mass_rename(b, p, r):\n # Parameters: \n # b - base directory\n # p - regular expression to match\n # r - replacement text\n\n # Create a set of the files to process.\n debug(\"Getting contents of directory \" + b)\n l = os.listdir(b);\n l.sort();\n d = set(l);\n\n # The dictionary where we put the files to rename.\n dd = {};\n\n # A flag indicating if at least one match was found.\n anymatch = False;\n\n # For each of the files in the set:\n # if it matches the pattern\n # - remove it from the set\n # - add it to renaming dict, put new name if new name is not in set\n # - put new name in set\n # else\n # - don't do anything.\n\n for f in l:\n\n # If recursive mode: If we've encountered a directory, proceed into it.\n if recursive and os.path.isdir(os.path.join(b,f)):\n subdir = os.path.join(b,f)\n debug(\"Entering directory \" + subdir)\n mass_rename(subdir, p, r)\n\n # In recursive mode, after a directory has been recursively processed, we\n # are free to rename it if it is a match itself.\n if p.match(f):\n debug(\"Match found: \" + f)\n anymatch = True\n\n # Create the new name.\n n = p.sub(r, f);\n\n # Remove element from the set (we will re-add it if it could not be renamed)\n d.remove(f);\n \n # If there already exists an object with the intendent new name, we cannot\n # rename it and put it back.\n if n in d:\n debug(\"Sorry, file with name \" + n + \n \" already exists. Will not rename.\");\n d.add(f);\n else:\n # Now that we have verified that we can use the new name, we put the old\n # and the new names in the dictionary, which we will use in the end to do\n # the actual renaming.\n dd[f] = n;\n \n # Also, we add the new name to the set so that future files will not be\n # renamed to the same.\n d.add(n);\n\n else:\n debug(\"No match: \" + f)\n\n # For now, let's just print the renaming that we will do.\n preview(dd);\n\n if not anymatch:\n print \"No match found in directory \", b\n elif (not force) and not confirm(prompt=\"Go ahead?\", resp=False):\n print \"Aborted.\"\n return\n\n # Now we can do the actual renaming.\n do_rename(b, dd);\n\n\ndef sorted_keys(dict):\n keys = dict.keys();\n keys.sort();\n return keys;\n\n\ndef confirm(prompt=None, resp=False):\n \"\"\"prompts for yes or no response from the user. Returns True for yes and\n False for no.\n\n 'resp' should be set to the default value assumed by the caller when\n user simply types ENTER.\n\n >>> confirm(prompt='Create Directory?', resp=True)\n Create Directory? [y]|n: \n True\n >>> confirm(prompt='Create Directory?', resp=False)\n Create Directory? [n]|y: \n False\n >>> confirm(prompt='Create Directory?', resp=False)\n Create Directory? [n]|y: y\n True\n\n \"\"\"\n \n if prompt is None:\n prompt = 'Confirm'\n\n if resp:\n prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')\n else:\n prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')\n \n while True:\n ans = raw_input(prompt)\n if not ans:\n return resp\n if ans not in ['y', 'Y', 'n', 'N']:\n print 'Please enter y or n.'\n continue\n if ans == 'y' or ans == 'Y':\n return True\n if ans == 'n' or ans == 'N':\n return False\n\n\ndef debug(str):\n if verbose:\n sys.stderr.write(str + \"\\n\");\n\n\ndef preview(dict):\n for k in sorted_keys(dict):\n print k, \"\\n -> \", dict[k]\n\n\ndef do_rename(b, dict):\n for f in sorted_keys(dict):\n old = os.path.join(b, f);\n new = os.path.join(b, dict[f]);\n debug(\"Renaming \" + old + \" -> \" + new);\n\n # If the old and the new name are the same, no need to rename anything.\n if old == new:\n debug(old + \" already has the correct name.\")\n continue\n\n # Last safety check\n if os.path.exists(new):\n print \"BAD BAD BAD! I almost tried to erase the existing file \", new, \".\"\n sys.exit(1)\n\n os.rename(old, new);\n\n\n\nif __name__ == \"__main__\":\n main();\n" }, { "alpha_fraction": 0.6993007063865662, "alphanum_fraction": 0.7027971744537354, "avg_line_length": 29.105262756347656, "blob_id": "fe2c385532377214d72fd145ea38f27074b55dd7", "content_id": "4cc1e60e2e601c51bf6fac9a1758746257c402d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 574, "license_type": "no_license", "max_line_length": 82, "num_lines": 19, "path": "/README.md", "repo_name": "infogrind/massrename", "src_encoding": "UTF-8", "text": "# massrename\n\n## Syntax:\n\n massrename [options] <directory> <pattern> <replacement>\n\nMassrename renames all files in `<directory>` that match the regular expression\n`<pattern>` using the substitution string `<replacement>`. The substitution string\ncan contain `\\1`, `\\2`, … if there are corresponding groups in the pattern.\n\n## Options:\n\n Options:\n -h Show this help text.\n -v Display verbose output.\n -i Ignore case in regular expression.\n -r Recursive mode.\n -f Force mode: does not ask for confirmation before renaming.\n -r Recursive mode\n" } ]
2
CintiaAssis/Fiap_ADS_ano1_fase2_cap3_atividade
https://github.com/CintiaAssis/Fiap_ADS_ano1_fase2_cap3_atividade
ff1a9fbe4a816ac51144b0da8edabdf0a916d390
d9b5965b99fe07a3b6bb557ebab6679482a49030
d84ec5da09fe50e7114c0ed410a59f79d2d5f334
refs/heads/main
2023-08-22T17:12:30.134611
2021-10-28T17:57:36
2021-10-28T17:57:36
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7594469785690308, "alphanum_fraction": 0.7686635851860046, "avg_line_length": 59.27777862548828, "blob_id": "d5ae4cde15dc4ed61e6462c7cd2f54a9b7a1d765", "content_id": "4c07bebb9704c452eead0e85cb02f145a121d0dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1117, "license_type": "no_license", "max_line_length": 194, "num_lines": 18, "path": "/Sequencial_Fibonacci.py", "repo_name": "CintiaAssis/Fiap_ADS_ano1_fase2_cap3_atividade", "src_encoding": "UTF-8", "text": "#Uma grande empresa de jogos está querendo tornar seus games mais desafiadores.\n#Por isso ela contratou você para desenvolver um algoritmo que será aplicado futuramente em diversos outros games: o algoritmo da sorte de Fibonnaci.\n#A ideia dessa empresa, é claro, é fazer com que seja mais difícil os jogadores terem sucesso nas ações que realizam nos games.\n#Por isso o seu algoritmo deverá funcionar da seguinte forma: o usuário deve digitar um valor numérico inteiro e o algoritmo deverá verificar se esse valor encontra-se na sequência de Fibonnaci.\n#Caso o número esteja na sequência, o algoritmo deve exibir a mensagem “Ação bem sucedida!” e, caso não esteja, deve exibir a mensagem “A ação falhou...”.\n\nescolha_usuario = int(input(\"Escolha um numero: \"))\nsequencial_1 = 0\nsequencial_2 = 1\nnumero_fibonacci = 0\nwhile numero_fibonacci < escolha_usuario:\n numero_fibonacci = sequencial_2 + sequencial_1\n sequencial_1 = sequencial_2\n sequencial_2 = numero_fibonacci\nif escolha_usuario == numero_fibonacci:\n print(\"Ação bem sucedida!\")\nelse:\n print(\"A ação falhou...\")\n" }, { "alpha_fraction": 0.7485970854759216, "alphanum_fraction": 0.7530864477157593, "avg_line_length": 62.57143020629883, "blob_id": "a9b38256f30ed2adcae74b1c593ede659c6f0323", "content_id": "95a731c4ef689b9f899c410fb5f18b23001dbd8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 911, "license_type": "no_license", "max_line_length": 170, "num_lines": 14, "path": "/Controle_gastos_diario.py", "repo_name": "CintiaAssis/Fiap_ADS_ano1_fase2_cap3_atividade", "src_encoding": "UTF-8", "text": "#Olhando para o mercado de educação infantil, você e sua equipe decidem criar um aplicativo onde as crianças aprendam a controlar os seus gastos.\n#Como forma de validar um protótipo, foi solicitado que você crie um script simples,\n#em que o usuário deve informar QUANTAS TRANSAÇÕES financeiras realizou ao longo de um dia e, na sequência, deve informar o VALOR DE CADA UMA das transações que realizou.\n#Seu programa deverá exibir, ao final, o valor total gasto pelo usuário e também a média do valor de cada transação.\n\nquantidade_compras = int(input('Quantas compras você fez hoje: '))\ncompra = 1\ntotal = 0\nfor x in range(quantidade_compras):\n compra = float(input('Informe o valor da compra: '))\n total = total + compra\n media = total / quantidade_compras\nprint(\"Hoje você gastou um total de R$ {:.2f}\".format(total))\nprint(\"Sua media por compra foi de R$ {:.2f}\".format(media))\n\n" }, { "alpha_fraction": 0.7696202397346497, "alphanum_fraction": 0.7734177112579346, "avg_line_length": 55.35714340209961, "blob_id": "60cf64666aedf151ab8c7181b1f1f2bafcaba09e", "content_id": "de3348d92d707b8efa5c715fe632850c0fe7093d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 805, "license_type": "no_license", "max_line_length": 118, "num_lines": 14, "path": "/Contador_de_calorias.py", "repo_name": "CintiaAssis/Fiap_ADS_ano1_fase2_cap3_atividade", "src_encoding": "UTF-8", "text": "#Uma das funções mais procuradas por usuários de aplicativos de saúde é o de controle de calorias ingeridas em um dia.\n#Por essa razão, você deve elaborar um algoritmo implementado em Python em que o usuário informe\n#quantos alimentos consumiu naquele dia e depois possa informar o número de calorias de cada um dos alimentos.\n#Como não estudamos listas nesse capítulo você não deve se preocupar em armazenar todas as calorias digitadas,\n#mas deve exibir o total de calorias no final.\n\n\nquantidade_alimentos = int(input(\"Qual a quantidade de alimentos que você consumiu hoje? \"))\ncalorias = 0\nsoma = 0\nfor x in range(quantidade_alimentos):\n calorias = float(input(\"Informe as calorias: \"))\n soma = soma + calorias\nprint(\"Hoje você consumiu um total de {:.2f} calorias\".format(soma))\n\n" } ]
3
nicktimko/technicolor-clippy
https://github.com/nicktimko/technicolor-clippy
2162427eb9a482333a7507fe399c236dfe51a9aa
8751b05dacd6e231f63bbfa8717fe9fdb5cc8be2
0029de0018f09067caa2784ae7a1836456323a9b
refs/heads/master
2021-01-19T12:18:18.268233
2016-10-06T03:20:48
2016-10-06T03:20:48
70,032,511
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7329931855201721, "alphanum_fraction": 0.7448979616165161, "avg_line_length": 19.275861740112305, "blob_id": "77531cd0ed90cc85c78c91e72b0078c449245be3", "content_id": "a71ccb71175c6832efea46b75e41abf1c3a8be55", "detected_licenses": [ "MIT", "PSF-2.0", "Python-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 588, "license_type": "permissive", "max_line_length": 75, "num_lines": 29, "path": "/technicolor/format.py", "repo_name": "nicktimko/technicolor-clippy", "src_encoding": "UTF-8", "text": "import sys\nimport time\n\nfrom pygments import highlight\nfrom pygments.lexers import PythonLexer\nfrom pygments.formatters import HtmlFormatter\nimport win32clipboard\n\nfrom .html_clipboard import HtmlClipboard, put_html\n\n\ndef highlight_python(snippet):\n return highlight(snippet, PythonLexer(), HtmlFormatter(noclasses=True))\n\n\ndef main():\n win32clipboard.OpenClipboard()\n snippet = win32clipboard.GetClipboardData()\n\n highlighted = highlight_python(snippet)\n\n print(highlighted)\n # put_html(highlighted)\n\n time.sleep(5)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n" }, { "alpha_fraction": 0.7860962748527527, "alphanum_fraction": 0.7976357936859131, "avg_line_length": 117.43333435058594, "blob_id": "6aab7a79200adae700617a61270ef366f731ad35", "content_id": "acbc7310bf0a51e50c725f1b811f9e321ef617ed", "detected_licenses": [ "MIT", "PSF-2.0", "Python-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3555, "license_type": "permissive", "max_line_length": 566, "num_lines": 30, "path": "/LICENSE.md", "repo_name": "nicktimko/technicolor-clippy", "src_encoding": "UTF-8", "text": "# License\n\n* Copyright © 2016 Nick Timkovich\n* *portions of the Windows clipboard code* Copyright © Phillip Piper 2006 under the terms of the PSF license\n\n## The MIT License (MIT)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\n**The Software is provided \"AS IS\" without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort or otherwise, arising from, out of or in connection with the software or the use or other dealings in the Software.**\n\n## Python Software Foundation License (Version 2)\n\n1. This LICENSE AGREEMENT is between the Python Software Foundation (\"PSF\"), and the Individual or Organization (\"Licensee\") accessing and otherwise using this software (\"Python\") in source or binary form and its associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, PSF hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use Python alone or in any derivative version, provided, however, that PSF's License Agreement and PSF's notice of copyright, i.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights Reserved\" are retained in Python alone or in any derivative version prepared by Licensee.\n\n3. In the event Licensee prepares a derivative work that is based on or incorporates Python or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to Python.\n\n4. PSF is making Python available to Licensee on an \"AS IS\" basis. **PSF makes no representations or warranties, express or implied. By way of example, but not limitation, PSF makes no and disclaims any representation or warranty of merchantability or fitness for any particular purpose or that the use of Python will not infringe any third party rights.**\n\n5. **PSF shall not be liable to licensee or any other users of Python for any incidental, special, or consequential damages or loss as a result of modifying, distributing, or otherwise using Python, or any derivative thereof, even if advised of the possibility thereof.**\n\n6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.\n\n7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between PSF and Licensee. This License Agreement does not grant permission to use PSF trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party.\n\n8. By copying, installing or otherwise using Python, Licensee agrees to be bound by the terms and conditions of this License Agreement.\n" }, { "alpha_fraction": 0.7436708807945251, "alphanum_fraction": 0.7768987417221069, "avg_line_length": 44.14285659790039, "blob_id": "58086f7556cea635c08e734208d7a199d6b99768", "content_id": "734b2db457b3b170768c8a6b7aaaad33ed895a34", "detected_licenses": [ "MIT", "PSF-2.0", "Python-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 632, "license_type": "permissive", "max_line_length": 152, "num_lines": 14, "path": "/README.md", "repo_name": "nicktimko/technicolor-clippy", "src_encoding": "UTF-8", "text": "# Technicolor Clippy\n\nTrying to convert code snippets to HTML (via Pygments) to be able to paste them into PowerPoint and have them look half-decent.\n\nWindows Clipboard code adapted from [Phillip Piper on ActiveState Code Recipes][winclip]\n\n## Clipboard details\n\n### Windows\n\n* [Clipboard Formats - MSDN](https://msdn.microsoft.com/en-us/library/windows/desktop/ms649013(v=vs.85).aspx)\n* [How does Copy Paste of formatted text work? - Stack Overflow](https://stackoverflow.com/questions/1885956/how-does-copy-paste-of-formatted-text-work)\n\n[winclip]: https://code.activestate.com/recipes/474121-getting-html-from-the-windows-clipboard/\n" }, { "alpha_fraction": 0.5660290122032166, "alphanum_fraction": 0.5743768215179443, "avg_line_length": 31.794677734375, "blob_id": "76a1839440ddeb6a1f792fe17e17f8dbc779c88c", "content_id": "067492e75d3c8c0808fc62453d9f3ae72fbdc77f", "detected_licenses": [ "MIT", "PSF-2.0", "Python-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8625, "license_type": "permissive", "max_line_length": 122, "num_lines": 263, "path": "/technicolor/html_clipboard.py", "repo_name": "nicktimko/technicolor-clippy", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import, print_function\n\nimport re\nimport win32clipboard\n\n\ndef has_html():\n \"\"\"\n Return True if there is a Html fragment in the clipboard..\n \"\"\"\n cb = HtmlClipboard()\n return cb.has_html_format()\n\n\ndef get_html():\n \"\"\"\n Return the Html fragment from the clipboard or None if there is no Html in the clipboard.\n \"\"\"\n cb = HtmlClipboard()\n if cb.has_html_format():\n return cb.get_fragment()\n else:\n return None\n\n\ndef put_html(fragment):\n \"\"\"\n Put the given fragment into the clipboard.\n Convenience function to do the most common operation\n \"\"\"\n cb = HtmlClipboard()\n cb.put_fragment(fragment)\n\n\nclass HtmlClipboard(object):\n\n CF_HTML = None\n\n MARKER_BLOCK_OUTPUT = (\n \"Version:1.0\\r\\n\"\n \"StartHTML:%09d\\r\\n\"\n \"EndHTML:%09d\\r\\n\"\n \"StartFragment:%09d\\r\\n\"\n \"EndFragment:%09d\\r\\n\"\n \"StartSelection:%09d\\r\\n\"\n \"EndSelection:%09d\\r\\n\"\n \"SourceURL:%s\\r\\n\"\n )\n\n MARKER_BLOCK_EX = (\n \"Version:(\\S+)\\s+\"\n \"StartHTML:(\\d+)\\s+\"\n \"EndHTML:(\\d+)\\s+\"\n \"StartFragment:(\\d+)\\s+\"\n \"EndFragment:(\\d+)\\s+\"\n \"StartSelection:(\\d+)\\s+\"\n \"EndSelection:(\\d+)\\s+\"\n \"SourceURL:(\\S+)\"\n )\n MARKER_BLOCK_EX_RE = re.compile(MARKER_BLOCK_EX)\n\n MARKER_BLOCK = (\n \"Version:(\\S+)\\s+\"\n \"StartHTML:(\\d+)\\s+\"\n \"EndHTML:(\\d+)\\s+\"\n \"StartFragment:(\\d+)\\s+\"\n \"EndFragment:(\\d+)\\s+\"\n \"SourceURL:(\\S+)\"\n )\n MARKER_BLOCK_RE = re.compile(MARKER_BLOCK)\n\n DEFAULT_HTML_BODY = (\n \"<!DOCTYPE HTML PUBLIC \\\"-//W3C//DTD HTML 4.0 Transitional//EN\\\">\"\n \"<HTML><HEAD></HEAD><BODY><!--StartFragment-->%s<!--EndFragment--></BODY></HTML>\"\n )\n\n def __init__(self):\n self.html = None\n self.fragment = None\n self.selection = None\n self.source = None\n self.html_clipboard_version = None\n\n def get_cf_html(self):\n \"\"\"\n Return the FORMATID of the HTML format\n \"\"\"\n if self.CF_HTML is None:\n self.CF_HTML = win32clipboard.RegisterClipboardFormat(\"HTML Format\")\n\n return self.CF_HTML\n\n def get_available_formats(self):\n \"\"\"\n Return a possibly empty list of formats available on the clipboard\n \"\"\"\n formats = []\n try:\n win32clipboard.OpenClipboard(0)\n cf = win32clipboard.EnumClipboardFormats(0)\n while (cf != 0):\n formats.append(cf)\n cf = win32clipboard.EnumClipboardFormats(cf)\n finally:\n win32clipboard.CloseClipboard()\n\n return formats\n\n def has_html_format(self):\n \"\"\"\n Return a boolean indicating if the clipboard has data in HTML format\n \"\"\"\n return (self.get_cf_html() in self.get_available_formats())\n\n\n def get_from_clipboard(self):\n \"\"\"\n Read and decode the HTML from the clipboard\n \"\"\"\n try:\n win32clipboard.OpenClipboard(0)\n src = win32clipboard.GetClipboardData(self.get_cf_html())\n #print src\n src = src.decode('utf-16')\n self.decode_clipboard_source(src)\n finally:\n win32clipboard.CloseClipboard()\n\n def decode_clipboard_source(self, src):\n \"\"\"\n Decode the given string to figure out the details of the HTML that's on the string\n \"\"\"\n # Try the extended format first (which has an explicit selection)\n matches = self.MARKER_BLOCK_EX_RE.match(src)\n if matches:\n self.prefix = matches.group(0)\n self.html_clipboard_version = matches.group(1)\n self.html = src[int(matches.group(2)):int(matches.group(3))]\n self.fragment = src[int(matches.group(4)):int(matches.group(5))]\n self.selection = src[int(matches.group(6)):int(matches.group(7))]\n self.source = matches.group(8)\n else:\n # Failing that, try the version without a selection\n matches = self.MARKER_BLOCK_RE.match(src)\n if matches:\n self.prefix = matches.group(0)\n self.html_clipboard_version = matches.group(1)\n self.html = src[int(matches.group(2)):int(matches.group(3))]\n self.fragment = src[int(matches.group(4)):int(matches.group(5))]\n self.source = matches.group(6)\n self.selection = self.fragment\n\n def get_html(self, refresh=False):\n \"\"\"\n Return the entire Html document\n \"\"\"\n if not self.html or refresh:\n self.get_from_clipboard()\n return self.html\n\n def get_fragment(self, refresh=False):\n \"\"\"\n Return the Html fragment. A fragment is well-formated HTML enclosing the selected text\n \"\"\"\n if not self.fragment or refresh:\n self.get_from_clipboard()\n return self.fragment\n\n def get_selection(self, refresh=False):\n \"\"\"\n Return the part of the HTML that was selected. It might not be well-formed.\n \"\"\"\n if not self.selection or refresh:\n self.get_from_clipboard()\n return self.selection\n\n def get_source(self, refresh=False):\n \"\"\"\n Return the URL of the source of this HTML\n \"\"\"\n if not self.selection or refresh:\n self.get_from_clipboard()\n return self.source\n\n def put_fragment(self, fragment, selection=None, html=None, source=None):\n \"\"\"\n Put the given well-formed fragment of Html into the clipboard.\n\n selection, if given, must be a literal string within fragment.\n html, if given, must be a well-formed Html document that textually\n contains fragment and its required markers.\n \"\"\"\n if selection is None:\n selection = fragment\n if html is None:\n html = self.DEFAULT_HTML_BODY % fragment\n if source is None:\n source = \"file://HtmlClipboard.py\"\n\n fragment_start = html.index(fragment)\n fragment_end = fragment_start + len(fragment)\n selection_start = html.index(selection)\n selection_end = selection_start + len(selection)\n self.put_to_clipboard(html, fragment_start, fragment_end, selection_start, selection_end, source)\n\n def put_to_clipboard(self, html, fragment_start, fragment_end, selection_start, selection_end, source=\"None\"):\n \"\"\"\n Replace the Clipboard contents with the given html information.\n \"\"\"\n try:\n win32clipboard.OpenClipboard(0)\n win32clipboard.EmptyClipboard()\n src = self.encode_clipboard_source(html, fragment_start, fragment_end, selection_start, selection_end, source)\n #print src\n win32clipboard.SetClipboardData(self.get_cf_html(), src)\n finally:\n win32clipboard.CloseClipboard()\n\n def encode_clipboard_source(self, html, fragment_start, fragment_end, selection_start, selection_end, source):\n \"\"\"\n Join all our bits of information into a string formatted as per the HTML format specs.\n \"\"\"\n # How long is the prefix going to be?\n dummy_prefix = self.MARKER_BLOCK_OUTPUT % (0, 0, 0, 0, 0, 0, source)\n len_prefix = len(dummy_prefix)\n\n prefix = self.MARKER_BLOCK_OUTPUT % (\n len_prefix,\n len(html) + len_prefix,\n fragment_start + len_prefix,\n fragment_end + len_prefix,\n selection_start + len_prefix,\n selection_end + len_prefix,\n source\n )\n return (prefix + html)\n\n\ndef dump_html():\n cb = HtmlClipboard()\n print(\"GetAvailableFormats()=%s\" % str(cb.get_available_formats()))\n print(\"HasHtmlFormat()=%s\" % str(cb.has_html_format()))\n if cb.has_html_format():\n cb.get_from_clipboard()\n print(\"prefix=>>>%s<<<END\" % cb.prefix)\n print(\"htmlClipboardVersion=>>>%s<<<END\" % cb.html_clipboard_version)\n print(\"GetSelection()=>>>%s<<<END\" % cb.get_selection())\n print(\"GetFragment()=>>>%s<<<END\" % cb.get_fragment())\n print(\"GetHtml()=>>>%s<<<END\" % cb.get_html())\n print(\"GetSource()=>>>%s<<<END\" % cb.get_source())\n\n\nif __name__ == '__main__':\n def test_SimpleGetPutHtml():\n data = \"<p>Writing to the clipboard is <strong>easy</strong> with this code.</p>\"\n put_html(data)\n if get_html() == data:\n print(\"passed\")\n else:\n print(\"failed\")\n\n test_SimpleGetPutHtml()\n dump_html()\n" } ]
4
TheBroMoe/pramp-interview-questions
https://github.com/TheBroMoe/pramp-interview-questions
dd68707a6b1f8346e9df338deddd4e1eb0247d68
b552551adc958bf980c42504866b303d86513aca
45380b07df616b8b2a8f5ad7a9ece6a5456d7f33
refs/heads/master
2020-03-24T06:13:01.562847
2018-09-06T02:24:17
2018-09-06T02:24:17
142,520,529
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46344485878944397, "alphanum_fraction": 0.49566295742988586, "avg_line_length": 24.634920120239258, "blob_id": "f4cda1bae13452fdd41043adbee86bf30238644a", "content_id": "0991fc5ec4cf79ffff1a4f8fbc7000963435b607", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1614, "license_type": "no_license", "max_line_length": 55, "num_lines": 63, "path": "/spiral_copy/spiral_copy.py", "repo_name": "TheBroMoe/pramp-interview-questions", "src_encoding": "UTF-8", "text": "'''\n [ [1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20] ]\n \n [1,2,3,...]\n output_array = []\n 1. traverse first list from 0 to n\n append to output_array\n stop searching \n 2. append last element of each list\n 3. append first to last\n 4. append first elemnt of each list\n 5. repeat 1. ignore last element\n \n psuedo-code:\n 1. traversing left to right (left_col, right_col)\n 2. up to down (top_row, bot_row)\n 3. right to left (right_col, left_col)\n 4. down to up (bot_row, top_row)\n \n O(n) time\n O(n) space\n \n R = numbers of rows\n C = # cols\n \n \n'''\n\ndef spiral_copy(inputMatrix):\n total_col = len(inputMatrix[0]) \n total_row = len(inputMatrix) \n left_col = 0\n right_col = total_col\n top_row = 0 \n bot_row = total_row\n \n output = []\n while top_row <= bot_row and left_col <= right_col:\n for i in range(left_col, right_col):\n output.append(inputMatrix[top_row][i])\n \n top_row += 1\n\n for j in range(top_row, bot_row):\n output.append(inputMatrix[j][right_col])\n \n right_col -= 1\n \n if top_row <= bot_row:\n for i in range(right_col, left_col):\n output.append(inputMatrix[bot_row][i])\n bot_row -= 1\n \n if left_col <= right_col:\n for j in range(bot_row , top_row):\n output.append(inputMatrix[j][left_col])\n\n left_col += 1\n \n return output" }, { "alpha_fraction": 0.5343642830848694, "alphanum_fraction": 0.5532646179199219, "avg_line_length": 14.506667137145996, "blob_id": "45334e73a669d0757ae623b11232c38fcef4b2c3", "content_id": "6b32ec1c5923b75cfb3caf0c991b82f3c84c8ee2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1168, "license_type": "no_license", "max_line_length": 50, "num_lines": 75, "path": "/Bracket Match/bracket_match.py", "repo_name": "TheBroMoe/pramp-interview-questions", "src_encoding": "UTF-8", "text": "'''\n\ngiven: uneven str of brackets\nwant: counter to balance str\nleft_count, right_count = 0, 0\n\"(()\"\n\n\"(((\" .........\n\")))()\"\n unblanced_brackets = 0\n left : 0\n right : 0\n if right:\n right += 1\n if left:\n left += 1\n \n if right > left:\n unbalanced_brackets += right - left\n right -= right - left\n \n if left > right:\n\n\")()\" : 2\n\nleft_count : 2\nright_count : 1\noutput: 1\n\nif left_count == right_count return 0\n\n“())(”\nleft_count : 2\nright_count : 2\n\nright > left :\n \nfor char in text:\n if char == \"(\"\n left_count += 1\n elif char == \")\"\n right_count += 1\n'''\n# \ndef bracket_match(text):\n \n if not text:\n return 0\n \n left_count = 0\n right_count = 0\n unbalanced_bracket = 0\n \n for char in text:\n if char == '(':\n left_count += 1\n \n elif char == ')':\n right_count += 1\n \n if right_count > left_count:\n diff = right_count - left_count\n unbalanced_bracket += diff\n \n right_count -= diff\n \n if left_count > right_count:\n unbalanced_bracket += left_count - right_count\n \n return unbalanced_bracket\n\nif __name__ == \"__main__\":\n \n print(bracket_match(\"(()\"))\n print(bracket_match(\"())(\")) " }, { "alpha_fraction": 0.5571428537368774, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 16.5, "blob_id": "7562105ca073c717c50fc4c4cca8aa7bfd13c5ad", "content_id": "2c12456bf83186dfe2821ee19864d3c7dfb130d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1050, "license_type": "no_license", "max_line_length": 81, "num_lines": 60, "path": "/Merging 2 Packages/get_indices_of_item_wights.py", "repo_name": "TheBroMoe/pramp-interview-questions", "src_encoding": "UTF-8", "text": "\n'''\nGiven: Array, limit\nWant: [index01, index02]; Array[index01] + Array[index02] == limit\nwhere index01 > index02\n\nIdeal Sol:\nN = len(arr)\nO(N) time\nO(1) \n\nlim -= arr[0] = 17\nif 17 in arr\nreturn \n\ndiff = limit - arr[i]\narr_ind = {}\narr_ind[arr[i]] = i\n\nif diff in arr_ind:\n\n\nex: input: arr = [4, 6, 10, 15, 16], lim = 21\n for element in arr\n store element and index in dict\n \n for element in arr\n diff = limit - element\n \n if arr_ind[diff] == Null\n continue\n otherwise\n return [arr_ind[arr[i]], arr_ind[arr[j]]] where i > j\n \n return []\n Watch out for hashing an item weight before looking up in the map its complement\n\n'''\n\ndef get_indices_of_item_wights(arr, limit):\n arr_ind = {}\n \n for ind, num in enumerate(arr):\n \n diff = limit - num\n complement_index = arr_ind.get(diff, -1)\n if complement_index != -1:\n return [ind, complement_index]\n\n else:\n arr_ind[num] = ind\n\n \n \n \n return []\n\narr = [4, 6, 10, 15, 16]\nlimit = 21\n\nprint(get_indices_of_item_wights(arr, limit))" }, { "alpha_fraction": 0.5808202624320984, "alphanum_fraction": 0.6248492002487183, "avg_line_length": 22.338027954101562, "blob_id": "b154ddbb4c78c4c157b4d9424c40273cce37c250", "content_id": "dd7095406258fdef10a191377bed2462c6974882", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1658, "license_type": "no_license", "max_line_length": 121, "num_lines": 71, "path": "/Decrypt Message/Decrypt_Message.py", "repo_name": "TheBroMoe/pramp-interview-questions", "src_encoding": "UTF-8", "text": "'''\nTIPS FOR SELF:\n - When evaluating your own example: try to take shortcuts to save time\n\nGiven: word = \"flgxswdliefy\"\nwant: \"encyclopedia\"\n\nstep 1: Decrypt the first letter\n - Convert to ASCII value; \n f: 102\n - Subtract 1; \n 102 - 1 = 101\n \n - Move value to be in range of a - z (97 - 122) ASCII values by adding 26\n 101 within range 97-122\n \n - Convert value back to a character\n 101: e\n \nstep 2: Given the decrypted previous letter 'prev' and it's value after the second step of encryption, 'second_step_prev'\n - Convert current letter to ascii value\n l: 108\n \n - Subtract value with second_step_prev\n 108 - 102 = 6\n \n - Add multiples of 26 to be in range of a-z ASCII value (97-122)\n 6 + 4 * 26 = 110\n \n - 3 parts\n * Convert result back to character:\n 110 : n\n \n * Store ASCII value in prev\n prev = 110\n \n * Add ASCII value to second_step_prev for decryption of next letter\n second_step_prev += prev \n\nstep 3: Append to decrypyted and repeat step 2 for next character until entire string is decrypted\n\nAlgorithm: \nenc[n] = dec[n] + second_step[n - 1] + 26 * m\ndec[n] = enc[n] - second_step[n - 1] - 26 * m\n\n\n\n'''\n'''\nCharacter -> ASCII:\nnumber = ord(char)\n\nAscii -> Character:\nchar = chr(number)\n''' \n\ndef decrypt(word):\n second_step = 1\n decrypt_word = \"\"\n\n for index in range(len(word)):\n new_letter_ascii = ord(word[index])\n new_letter_ascii -= second_step\n\n while new_letter_ascii < ord('a'):\n new_letter_ascii += 26\n\n decrypt_word += chr(new_letter_ascii)\n second_step += new_letter_ascii\n\n return decrypt_word\n " }, { "alpha_fraction": 0.4909188151359558, "alphanum_fraction": 0.5133547186851501, "avg_line_length": 22.708860397338867, "blob_id": "d17f152affc79292bc6e78312e46b038e4299692", "content_id": "dc26660a047bcfee00640e623b8f859239e4eb3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1872, "license_type": "no_license", "max_line_length": 69, "num_lines": 79, "path": "/Island Count/search.py", "repo_name": "TheBroMoe/pramp-interview-questions", "src_encoding": "UTF-8", "text": "'''\n \n [[0, 1, 0, 1, 0],\n [1, 0, 1, 1, 1],\n [1, 0, 0, 1, 0],\n [0, 1, 1, 0, 0],\n [1, 0, 1, 0, 1] ]\n R = total rows\n C = totals cols\n R * C\n total_islands = 0\n row_cur = 0\n col_cur = 0\n \n visited[(row, col)] = True\n \n if hit unvisited 1:\n mark as visited\n check adjacent 1's\n \n increment total_islands\n otherwise:\n continue\n \n\n'''\n\ndef check(row, col, num_rows, num_cols, visited, queue):\n\n # Row check (not reading outside of bounds)\n # Column check (not reading outside of bounds)\n # Checking if you already visited:\n \n if row < num_rows and col < num_cols and col >= 0 and row >= 0:\n if visited[(row, col)] == False:\n queue.append([row, col])\n return queue\n \n \n \ndef search(row, col, num_rows, num_cols, visited):\n queue = [(row, col)]\n while queue:\n curr_node = queue.remove((row,col))\n \n if visited[curr_node] == False:\n visited[curr_node] = True\n row = curr_node[0]\n col = curr_node[1]\n queue = check(row + 1, col, num_rows, num_cols, visited, queue)\n queue = check(row - 1, col, num_rows, num_cols, visited, queue)\n queue = check(row, col + 1, num_rows, num_cols, visited, queue)\n queue = check(row, col - 1, num_rows, num_cols, visited, queue)\n\n \n \n \n \n \ndef get_number_of_islands(binaryMatrix):\n R = len(binaryMatrix)\n C = len(binaryMatrix[0]) \n total_islands = 0\n\n visited = {}\n \n for i in range(R):\n for j in range(C):\n visited[(i, j)] = False\n \n \n \n for row in range(R):\n for col in range(C):\n if binaryMatrix[row][col] == 1 and visited[(row,col)] == False:\n search(row, col, R, C, visited)\n total_islands += 1\n \n return total_islands" }, { "alpha_fraction": 0.3838678300380707, "alphanum_fraction": 0.3974732756614685, "avg_line_length": 18.245283126831055, "blob_id": "80ac4d62fdd8076dcd5871d974ce7982a21d0e1d", "content_id": "e6395bd37ef0257513214e1eb921aa529e9064a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1029, "license_type": "no_license", "max_line_length": 74, "num_lines": 53, "path": "/Sentence Reverse/reverse_words.py", "repo_name": "TheBroMoe/pramp-interview-questions", "src_encoding": "UTF-8", "text": "'''\n\narr = [ 'p', 'e', 'r', 'f', 'e', 'c', 't', ' ', #perfect\n 'm', 'a', 'k', 'e', 's', ' ',# makes\n 'p', 'r', 'a', 'c', 't', 'i', 'c', 'e' ] # practice\n\n \n [ 'p', 'r', 'a', 'c', 't', 'i', 'c', 'e', ' ',\n 'm', 'a', 'k', 'e', 's', ' ',\n 'p', 'e', 'r', 'f', 'e', 'c', 't' ]\n \n method 1: save each sererate word into a list\n append char by char into the new array\n \n arr[0:6] --> output[n-7:n-1]\n \n'''\n\n \n''' while start_ptr < n\n \n if arr[start_ptr] == \" \":\n output[end_ptr] \n \n counter += 1\n start_ptr += 1\n end_ptr -= 1\n '''\n \ndef reverse_words(arr):\n n = len(arr)\n \n output = [' '] * n\n \n size = 0\n \n start_ptr = 0\n \n end_ptr = n\n\n # loop through array\n while start_ptr < n:\n if arr[start_ptr] == \" \":\n output[end_ptr:(end_ptr + size)] = arr[(start_ptr - size):start_ptr]\n size = 0\n else: \n size += 1\n \n start_ptr += 1 \n end_ptr -= 1\n \n output[:size] = arr[n-size:]\n return output\n \n " }, { "alpha_fraction": 0.46244344115257263, "alphanum_fraction": 0.5158371329307556, "avg_line_length": 17.131147384643555, "blob_id": "c68598ce5bb9c0f3bb96242c4207f69d0e42c6ba", "content_id": "e946dae3d44ca83a6c41fa9c2ca7d65e9a6bbe1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1105, "license_type": "no_license", "max_line_length": 71, "num_lines": 61, "path": "/Array of Array Products/array_of_array_products.py", "repo_name": "TheBroMoe/pramp-interview-questions", "src_encoding": "UTF-8", "text": "'''\n Given: [8, 10, 2]\n result: [10* 2, 8*2, 8*10]\n \ninput: arr = [2, 7, 3, 4]\noutput: [84, 24, 56, 42] # by calculating: [7*3*4, 2*3*4, 2*7*4, 2*7*3]\n \n a,b,c,d,e\n \n c = a*b * d*e\n d = (a*b*c) * e\n \n \n \nstart -> end ; end -> start\n\nproduct = 1\n ->\nleft_array = [1,8,80]\nright_array = [1,2,20]\nright_array =[20,2,1]\n <-\n \nfor i = 0; i < len(arr); i++{\n product *= arr[i]\n left_array.append(product)\n }\n\nfor i = len(arr); i > 0; i++{\n product *= arr[i]\n right_array.append(product)\n }\n \n final_array = []\nfor i = 0; i < len(arr); i++\n final_array[i] = left_array[i] * right_array[len(arr)-i]\nstart: \nproduct\n'''\ndef array_of_array_products(arr):\n # Initialize product\n product = 1\n # Initialize arrays\n start_array = []\n \n n = len(arr)\n \n if n == 0 or n == 1:\n return []\n \n # Append intermediate products to start_array\n for i in range(n):\n start_array.append(product)-\n product *= arr[i]\n \n product = 1\n for i in range(n - 1, -1, -1):\n start_array[i] *= product\n product *= arr[i]\n \n return start_array" }, { "alpha_fraction": 0.5363128781318665, "alphanum_fraction": 0.5602554082870483, "avg_line_length": 21.321428298950195, "blob_id": "cc36dcb618bce85fae5c5c214b6d915fd2b4c1e6", "content_id": "76437e7c521c3029a85dabfc5bd0c2c5cac8f0b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1253, "license_type": "no_license", "max_line_length": 63, "num_lines": 56, "path": "/get_cheapest_cost/get_cheapest_cost.py", "repo_name": "TheBroMoe/pramp-interview-questions", "src_encoding": "UTF-8", "text": "'''\nnode -> cost\nnode -> node \n find least cost\n \n (0).children = [5, 3, 6]\n (5).parent = 0\n'''\ndef get_cheapest_cost(rootNode):\n children = rootNode.children # :(\n\n if children == []: \n return rootNode.cost\n \n else:\n min_cost = float('inf')\n for child in children:\n cost = get_cheapest_cost(child)\n if cost < min_cost:\n min_cost = cost\n \n return min_cost + rootNode.cost\n\n\n########################################## \n# Use the helper code below to implement #\n# and test your function above #\n##########################################\n\n# A node \nclass Node:\n\n # Constructor to create a new node\n def __init__(self, cost):\n self.cost = cost\n self.children = []\n self.parent = None\n\n\ndef setup_example():\n root = Node(0)\n \n root.children = [Node(5), Node(3), Node(6)]\n root.children[0].children = [Node(4)]\n root.children[1].children = [Node(2), Node(0)]\n root.children[1].children[0].children = [Node(1)]\n root.children[1].children[0].children[0].children = [Node(1)]\n root.children[1].children[1].children = [ Node(10)]\n root.children[2].children = [Node(1), Node(5)]\n \n return root\n\nif __name__ == \"__main__\":\n root = setup_example()\n \n print(get_cheapest_cost(root) == 7)\n " }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 50, "blob_id": "6978b59f77cd367192d1954e3cd908114bf9d511", "content_id": "c6d46d9b576f272a8e5fcbdbf4b494130c443fc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 102, "license_type": "no_license", "max_line_length": 73, "num_lines": 2, "path": "/README.md", "repo_name": "TheBroMoe/pramp-interview-questions", "src_encoding": "UTF-8", "text": "# pramp-interview-questions\nInterview questions from pramp mock interviews done by myself or another\n" } ]
9
sphinx-contrib/imageembed
https://github.com/sphinx-contrib/imageembed
40d06b61d2127fa97e71b7e63dd328a2136dcde7
cf890a5c31239122736e4f47af7e0237c7d1f94c
d51e711c6bcf5f847e027cd7f3a7b6cb94430bf2
refs/heads/master
2022-09-28T19:32:50.346656
2022-09-12T08:10:40
2022-09-16T17:34:36
146,723,234
0
1
BSD-2-Clause
2018-08-30T08:56:55
2022-09-16T17:34:41
2022-09-17T06:57:34
Python
[ { "alpha_fraction": 0.5145630836486816, "alphanum_fraction": 0.5145630836486816, "avg_line_length": 19.600000381469727, "blob_id": "08dafaee2b27e588e64efd922c59cb4e799e551e", "content_id": "d20d6cb84c31d0ee09a54906c775f3ed942febf0", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 103, "license_type": "permissive", "max_line_length": 29, "num_lines": 5, "path": "/tests/roots/test-sphinxcontrib-imageembed/index.rst", "repo_name": "sphinx-contrib/imageembed", "src_encoding": "UTF-8", "text": "test-sphinxcontrib-imageembed\n=============================\n\n.. image:: bolt.png\n.. image:: svgimg.svg\n" }, { "alpha_fraction": 0.6504629850387573, "alphanum_fraction": 0.6770833134651184, "avg_line_length": 15, "blob_id": "00b62bd032262c906613013b02449f0f41b8b95d", "content_id": "4f54037ca91561d2b06492b9f0f15585bee5da08", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 864, "license_type": "permissive", "max_line_length": 42, "num_lines": 54, "path": "/tox.ini", "repo_name": "sphinx-contrib/imageembed", "src_encoding": "UTF-8", "text": "[tox]\nminversion = 2.0\nenvlist = py{27,34,35,36,py},style\n\n[testenv]\ndeps = -r{toxinidir}/test-requirements.txt\ncommands=\n pytest\n\n[testenv:mypy]\ndescription =\n Run type checks.\ndeps =\n mypy\ncommands=\n mypy sphinxcontrib\n\n[testenv:style]\ndescription =\n Run style checks.\ndeps =\n flake8\n isort\n yapf\ncommands =\n isort -rc -c -df sphinxcontrib tests\n yapf -rd sphinxcontrib tests\n flake8 sphinxcontrib tests setup.py\n\n[testenv:build]\ndescription =\n Generate build\nskip_install = true\ndeps =\n wheel\n setuptools\ncommands =\n python setup.py -q sdist bdist_wheel\n\n[testenv:release]\ndescription =\n Publish release\nskip_install = true\ndeps =\n {[testenv:build]deps}\n twine >= 1.5.0\ncommands =\n {[testenv:build]commands}\n twine upload --skip-existing dist/*\n\n[travis]\npython =\n 2.7: py27, style\n 3.6: py36, mypy\n" }, { "alpha_fraction": 0.4545454680919647, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 10, "blob_id": "0ab557d1f0748a9577c6abcf34081d1023986454", "content_id": "20be8f40df9789cb38263b8b32a3e2f3c7265b30", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 33, "license_type": "permissive", "max_line_length": 16, "num_lines": 3, "path": "/test-requirements.txt", "repo_name": "sphinx-contrib/imageembed", "src_encoding": "UTF-8", "text": "pbr\npytest>=3.0,<4.0\nsphinx>=1.7\n" }, { "alpha_fraction": 0.5942173600196838, "alphanum_fraction": 0.6031904220581055, "avg_line_length": 27.253520965576172, "blob_id": "dd1a5e8b71556ab8678d8377324936bfa2955bd3", "content_id": "9f82debcb94ca84dbf53f2ca4515c11fde7a66e2", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2006, "license_type": "permissive", "max_line_length": 77, "num_lines": 71, "path": "/sphinxcontrib/imageembed.py", "repo_name": "sphinx-contrib/imageembed", "src_encoding": "UTF-8", "text": "\"\"\"\n sphinxcontrib.imageembed\n ~~~~~~~~~~~~~~~~~~~~~~~~\n\n Embed images directly into generated html docs.\n\n :copyright: Copyright 2018 by Jan Gutter <[email protected]>\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport base64\nimport os\n\nfrom six import text_type\nfrom sphinx.transforms.post_transforms.images import ImageConverter\nfrom sphinx.util import logging\nfrom sphinx.util.images import guess_mimetype\n\ntry:\n import pbr.version\n __version__ = pbr.version.VersionInfo('imageembed').version_string()\nexcept ImportError:\n __version__ = '0.0.0'\n\nif False:\n # For type annotations\n from docutils import nodes # noqa\n from typing import Any, Dict # noqa\n from sphinx.application import Sphinx # noqa\n\nlogger = logging.getLogger(__name__)\n\n\ndef _convert_to_data_uri(filename):\n # type: (str) -> str\n encoded = base64.b64encode(open(filename, \"rb\").read())\n mimetype = guess_mimetype(filename, default='*')\n data_uri = 'data:{};base64,{}'.format(mimetype, encoded)\n return data_uri\n\n\nclass ImageEmbedder(ImageConverter):\n default_priority = 200\n\n def match(self, node):\n # type: (nodes.Node) -> bool\n if self.app.builder.supported_image_types == []:\n return False\n else:\n return self.app.builder.supported_data_uri_images\n\n def handle(self, node):\n # type: (nodes.Node) -> None\n try:\n node['alt'] = node['uri']\n basename = os.path.basename(node['uri'])\n path = os.path.join(self.app.srcdir, basename)\n node['uri'] = _convert_to_data_uri(path)\n except Exception as exc:\n logger.error('Could not embed image: %s [%s]' % (node['alt'],\n text_type(exc)))\n\n\ndef setup(app):\n # type: (Sphinx) -> Dict[unicode, Any]\n app.add_post_transform(ImageEmbedder)\n return {\n 'version': __version__,\n 'parallel_read_safe': True,\n 'parallel_write_safe': True\n }\n" }, { "alpha_fraction": 0.6359060406684875, "alphanum_fraction": 0.6442952752113342, "avg_line_length": 26.090909957885742, "blob_id": "fb63a65fe2b0027364cca8ecc050474b2545fec5", "content_id": "70516c0fd5b58dde8cf737e885ddf0b44d3c6bfd", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 596, "license_type": "permissive", "max_line_length": 70, "num_lines": 22, "path": "/tests/test_imageembed.py", "repo_name": "sphinx-contrib/imageembed", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n test_imageembed\n ~~~~~~~~~~~~~~~\n\n Test sphinxcontrib.imageembed extension.\n\n :copyright: Copyright 2018 by Jan Gutter <[email protected]>\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport pytest\n\n\[email protected]('singlehtml', testroot='sphinxcontrib-imageembed')\ndef test_sphinxcontrib_imageembed(app, status, warning):\n app.build()\n\n # content = (app.outdir / 'Python.html').text()\n # assert '\\\\sphinxincludegraphics{{svgimg}.png}' in content\n # assert not (app.outdir / 'svgimg.svg').exists()\n # assert (app.outdir / 'svgimg.png').exists()\n" }, { "alpha_fraction": 0.6710280179977417, "alphanum_fraction": 0.6710280179977417, "avg_line_length": 25.75, "blob_id": "50bd26bfa46b579275c484cbe02dea95d9e80a7e", "content_id": "79dee1410f8b4d75781f445573023ada3101acd5", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 535, "license_type": "permissive", "max_line_length": 76, "num_lines": 20, "path": "/README.rst", "repo_name": "sphinx-contrib/imageembed", "src_encoding": "UTF-8", "text": "========================\nsphinxcontrib.imageembed\n========================\n\n.. image:: https://travis-ci.org/sphinx-contrib/imageembed.svg?branch=master\n :target: https://travis-ci.org/sphinx-contrib/imageembed\n\nEmbed images directly into generated html docs.\n\nOverview\n--------\n\nBy adding ``extensions = ['sphinxcontrib.imageembed']`` to ``conf.py``, html\nimages will be embedded as data_uri src tags.\n\nLinks\n-----\n\n- Source: https://github.com/sphinx-contrib/imageembed\n- Bugs: https://github.com/sphinx-contrib/imageembed/issues\n" } ]
6
ahmdrz/facedetector
https://github.com/ahmdrz/facedetector
0c80dab825baa2205765b77fc103ef538ca28e0b
817677a40f28058f7fce07e3b060f178284faedc
e3f843013d45f7a00e3455e547433597ced9f9ea
refs/heads/master
2021-04-06T00:19:48.262913
2019-02-20T09:41:40
2019-02-20T09:41:40
124,878,200
13
9
null
null
null
null
null
[ { "alpha_fraction": 0.7011494040489197, "alphanum_fraction": 0.7413793206214905, "avg_line_length": 28.16666603088379, "blob_id": "15a0e361d9c98bd52d3a987a10a75f35c6507add", "content_id": "2acffe37c76cc89f229a683bd82566d389178c18", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 174, "license_type": "permissive", "max_line_length": 74, "num_lines": 6, "path": "/face_detection/data/models.sh", "repo_name": "ahmdrz/facedetector", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nwget \"http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2\"\nwget \"http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2\"\n\nbzip2 -d *.bz2" }, { "alpha_fraction": 0.5433422923088074, "alphanum_fraction": 0.5478105545043945, "avg_line_length": 26.292682647705078, "blob_id": "5d7c76cdbb0c5da43e16ebf77adc526ce398d3a3", "content_id": "2fbcbe5eca4aa22ebb109dcee4b3ee206bc3bf90", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1119, "license_type": "permissive", "max_line_length": 83, "num_lines": 41, "path": "/face_detection/face_detector.py", "repo_name": "ahmdrz/facedetector", "src_encoding": "UTF-8", "text": "import dlib\nimport face_landmarks\nimport face_descriptor\nimport gender_detection\n\n_detector = dlib.get_frontal_face_detector()\n\n\ndef detect(image, landmarks=False, gender=False, min_score=0.2):\n image_h, image_w = image.shape[:2]\n\n boxes, scores, idx = _detector.run(image, 1, -1)\n\n output = []\n for i, d in enumerate(boxes):\n if scores[i] < min_score:\n continue\n\n x, y, w, h = d.left(), d.top(), d.width(), d.height()\n x = x / float(image_w)\n y = y / float(image_h)\n w = w / float(image_w)\n h = h / float(image_h)\n face = {\n \"box\": [x, y, w, h],\n \"score\": scores[i],\n \"index\": idx[i],\n }\n if landmarks or gender:\n shape = face_landmarks.face_shape(image, d)\n\n if landmarks:\n face[\"landmarks\"] = face_landmarks.landmarks(image, shape=shape)\n\n if gender:\n encoding = face_descriptor.describe(image, shape)\n face[\"gender\"] = gender_detection.predict_gender(encoding=encoding)\n\n output.append(face)\n\n return output\n" }, { "alpha_fraction": 0.647773265838623, "alphanum_fraction": 0.647773265838623, "avg_line_length": 20.478260040283203, "blob_id": "12bfcb1596512a1a29f8b11fb3814fbdbb9583e0", "content_id": "051a3380e5c51effa1a4a7e514a524756deb4e4f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 494, "license_type": "permissive", "max_line_length": 44, "num_lines": 23, "path": "/face_detection/image_reader.py", "repo_name": "ahmdrz/facedetector", "src_encoding": "UTF-8", "text": "from skimage import io\nfrom imutils import resize, url_to_image\n\n\ndef read_image(path, width=None):\n image = io.imread(path)\n if width:\n image = resize(image, width=width)\n return image\n\n\ndef read_url(url, width=None):\n image = url_to_image(url)\n if width:\n image = resize(image, width=width)\n return image\n\n\ndef read_string(str, width=None):\n image = io.imread(str, plugin='imageio')\n if width:\n image = resize(image, width=width)\n return image\n" }, { "alpha_fraction": 0.516087532043457, "alphanum_fraction": 0.5585585832595825, "avg_line_length": 33.53333282470703, "blob_id": "8ebfeb5172da6bcba2cf7483ced0f00af58364c7", "content_id": "79bc037159939e3e4326bc3412f2877c07647fd0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1554, "license_type": "permissive", "max_line_length": 95, "num_lines": 45, "path": "/example.py", "repo_name": "ahmdrz/facedetector", "src_encoding": "UTF-8", "text": "import cv2\nfrom imutils.video import WebcamVideoStream\nfrom imutils import resize\nimport requests\n\n\ndef detect(img, url='http://localhost:5000/detect?landmarks=on&gender=on'):\n _, img_encoded = cv2.imencode('.jpg', img)\n resp = requests.post(url, data=img_encoded.tostring())\n return resp.json()\n\n\ncamera = WebcamVideoStream(src=0).start()\n\nwhile True:\n image = camera.read()\n image = resize(image, width=400)\n\n image_h, image_w = image.shape[:2]\n\n result = detect(image)\n if result[\"status\"] == \"ok\":\n for face in result[\"result\"]:\n x, y, w, h = face[\"box\"]\n x, y, w, h = int(x * image_w), int(y * image_h), int(w * image_w), int(h * image_h)\n\n landmarks = face[\"landmarks\"]\n for point in landmarks:\n point_x = int(point[0] * image_w)\n point_y = int(point[1] * image_h)\n cv2.circle(image, (point_x, point_y), 1, (200, 200, 100), -1)\n\n color = (0, 0, 255) if face[\"gender\"] == \"male\" else (0, 255, 0)\n text = \"gender: {}\".format(face[\"gender\"])\n text_width, text_height = cv2.getTextSize(text, cv2.FONT_HERSHEY_PLAIN, 0.75, 1)[0]\n cv2.rectangle(image, (x, y - text_height - 5), (x + text_width, y), color, -1)\n cv2.putText(image, text, (x, y - 3), cv2.FONT_HERSHEY_PLAIN, 0.75, (255, 255, 255))\n cv2.rectangle(image, (x, y), (x + w, y + h), color, 1)\n\n cv2.imshow(\"image\", image)\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n break\n\ncamera.stop()\n" }, { "alpha_fraction": 0.7945205569267273, "alphanum_fraction": 0.835616409778595, "avg_line_length": 9.571428298950195, "blob_id": "7030298719f2b0ffc8b151f7e6440dd17ff3c046", "content_id": "29ec71b62c5b1cd5fb0dfd12908de5ea72f836ee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 73, "license_type": "permissive", "max_line_length": 21, "num_lines": 7, "path": "/requirements.txt", "repo_name": "ahmdrz/facedetector", "src_encoding": "UTF-8", "text": "dlib==19.7\nopencv-contrib-python\nscikit-image\nimutils\nflask\nnumpy\nimageio" }, { "alpha_fraction": 0.6237270832061768, "alphanum_fraction": 0.6354379057884216, "avg_line_length": 26.661972045898438, "blob_id": "674f102e5713384a3cfa3d62c8ec05cd1324db21", "content_id": "8489110d5295e08bab9b636b71cfb7398d93e547", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1964, "license_type": "permissive", "max_line_length": 100, "num_lines": 71, "path": "/server.py", "repo_name": "ahmdrz/facedetector", "src_encoding": "UTF-8", "text": "from face_detection import face_detector, image_reader\nimport time\nimport os\nimport shutil\nimport tempfile\nimport logging\nfrom flask import jsonify\nfrom flask import request\nfrom flask import Flask\nimport dlib\n\nprint \"dlib version: {}\".format(dlib.__version__)\n\n\napp = Flask(__name__)\n\ntemporary_directory = tempfile.mkdtemp()\n\n\[email protected](400)\ndef bad_request(e):\n return jsonify({\"status\": \"not ok\", \"message\": \"this server could not understand your request\"})\n\n\[email protected](404)\ndef not_found(e):\n return jsonify({\"status\": \"not found\", \"message\": \"route not found\"})\n\n\[email protected](500)\ndef not_found(e):\n return jsonify({\"status\": \"internal error\", \"message\": \"internal error occurred in server\"})\n\n\[email protected]('/detect', methods=['GET', 'POST'])\ndef detect_human_faces():\n if request.method == 'POST':\n if 'image' in request.files:\n f = request.files['image']\n unix_time = int(time.time())\n path = os.path.join(temporary_directory,\n '{}_{}'.format(unix_time, f.filename))\n f.save(path)\n\n image = image_reader.read_image(path, width=400)\n os.remove(path)\n elif request.data:\n image = image_reader.read_string(request.data)\n else:\n image_url = request.args.get('url')\n image = image_reader.read_url(image_url, width=400)\n\n landmarks = request.args.get('landmarks') == 'on'\n gender = request.args.get('gender') == 'on'\n\n faces = face_detector.detect(image, landmarks=landmarks, gender=gender)\n\n return jsonify({\"status\": \"ok\", \"result\": faces})\n\n\nif __name__ == \"__main__\":\n log = logging.getLogger('werkzeug')\n log.setLevel(logging.ERROR)\n\n print \"Starting server on http://localhost:5000\"\n print \"Serving ...\",\n app.run(host='0.0.0.0')\n print \"Finished !\"\n print \"Removing temporary directory ...\",\n shutil.rmtree(temporary_directory)\n print \"Done !\"\n" }, { "alpha_fraction": 0.5788139700889587, "alphanum_fraction": 0.6279776692390442, "avg_line_length": 20.45652198791504, "blob_id": "102a4c795d3ef2435f7cfd7faec39d4c963fd821", "content_id": "dd82deaa619b10d7e35d71a2efad2721aa2a4296", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1973, "license_type": "permissive", "max_line_length": 156, "num_lines": 92, "path": "/README.md", "repo_name": "ahmdrz/facedetector", "src_encoding": "UTF-8", "text": "# Face Detector\nA Simple Flask and docker-ready application that works as a server to detect faces, genders and their landmarks.\n\n### Docker\n\n```bash\n$ docker pull ahmdrz/facedetector:latest\n$ docker run -p 5000:5000 ahmdrz/facedetector:latest\n```\n\n#### Sample\n\n**Request**\n\n```bash\n$ curl \"localhost:5000/detect?gender=on&landmarks=on&url=https://pixel.nymag.com/imgs/daily/vulture/2018/09/04/04-eminem-2.w700.h700.jpg\n```\n\n**Response**\n\n```json\n{\n \"result\": [\n {\n \"box\": [\n 0.3375,\n 0.115,\n 0.2275,\n 0.2275\n ],\n \"gender\": \"male\",\n \"index\": 0,\n \"landmarks\": [\n [\n 0.36,\n 0.1875\n ],\n [\n 0.3625,\n 0.21\n ],\n [\n 0.365,\n 0.2325\n ] \n ],\n \"score\": 0.43\n }\n ],\n \"status\": \"ok\"\n}\n```\n\n**NOTE** Landmarks are 68 points.\n\n### Dependencies\n\nFirst of all , clone this repository using `git clone https://github.com/ahmdrz/facedetector`.\n\nTo install all of dependencies run `sudo pip install -r requirements.txt` , after installing python packages , you have to download pre-trained dlib models.\n\n```\n cd face_detection/data\n ./models.sh\n```\n\n### How to use in clients ?\n\nSee `example.py` for details.\n\nThis small application has only one route , `/detect`.\n\nOptional query parameters is `landmarks` and `gender`. Use `landmarks=on` to detect 68 point of face landmarks. Use `gender=on` to predict face gender.\n\nYou can use `url` query parameter.\n\nCurl examples :\n\n```bash\n$ curl \"localhost:5000/detect?url=<picture url>\"\n$ curl -F \"image=@<picture file path>\" \"localhost:5000/detect?landmarks=on\"\n$ curl -F \"image=@<picture file path>\" \"localhost:5000/detect?gender=on\"\n```\n\n**Note**: All of the cordinates in this application is based of `image width` and `image height`.\n\n#### Keywords\n\n0. Face/Gender/Landmarks dectection\n1. Docker face detection\n2. Docker face gender detector\n3. Docker face landmarks" }, { "alpha_fraction": 0.6813380122184753, "alphanum_fraction": 0.6919013857841492, "avg_line_length": 27.399999618530273, "blob_id": "f1eceb933a3ec6420e48d027f25fdf2f35d80ed3", "content_id": "af433674f058333b39cec2d5c3e0b71e0366e878", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 568, "license_type": "permissive", "max_line_length": 120, "num_lines": 20, "path": "/face_detection/face_landmarks.py", "repo_name": "ahmdrz/facedetector", "src_encoding": "UTF-8", "text": "import os\nimport dlib\n\n_predictor_file = os.path.join(os.path.join(os.path.dirname(__file__), \"data\"), \"shape_predictor_68_face_landmarks.dat\")\n_predictor_points = 68\n\nif not os.path.exists(_predictor_file):\n print \"Shape predictor file not found.\"\n exit(1)\n\n_predictor = dlib.shape_predictor(_predictor_file)\n\n\ndef landmarks(image, shape):\n image_h, image_w = image.shape[:2]\n return [(shape.part(i).x / float(image_w), shape.part(i).y / float(image_h)) for i in range(_predictor_points)]\n\n\ndef face_shape(image, rect):\n return _predictor(image, rect)\n" }, { "alpha_fraction": 0.679358720779419, "alphanum_fraction": 0.6833667159080505, "avg_line_length": 23.950000762939453, "blob_id": "282c7e33f507e6b513becf95fc66c074240cbaff", "content_id": "32eef38cda5b6292c7b9030390408c331892f51d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 499, "license_type": "permissive", "max_line_length": 100, "num_lines": 20, "path": "/face_detection/gender_detection.py", "repo_name": "ahmdrz/facedetector", "src_encoding": "UTF-8", "text": "import dlib\nimport pickle\nimport os\n\n_classifier = pickle.load(\n open(os.path.join(os.path.join(os.path.dirname(__file__), \"data\"), \"gender_model.pickle\"), 'r'))\n\n\n# For train your own gender model visit:\n# https://github.com/mrl-athomelab/ros-face-recognition#gender-detection\n\ndef predict_gender(encoding, threshold=0.5):\n result = _classifier(dlib.vector(encoding))\n if result > threshold:\n return \"male\"\n\n if result < -threshold:\n return \"female\"\n\n return \"unknown\"\n" }, { "alpha_fraction": 0.7132353186607361, "alphanum_fraction": 0.720588207244873, "avg_line_length": 29.22222137451172, "blob_id": "5cd78425401eefac83efcae8b3b11be852b65fdd", "content_id": "b082c149b076d967b78ca36bff5b7e08299bb4e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 272, "license_type": "permissive", "max_line_length": 111, "num_lines": 9, "path": "/face_detection/face_descriptor.py", "repo_name": "ahmdrz/facedetector", "src_encoding": "UTF-8", "text": "import dlib\nimport os\n\n_face_model = dlib.face_recognition_model_v1(\n os.path.join(os.path.join(os.path.dirname(__file__), \"data\"), \"dlib_face_recognition_resnet_model_v1.dat\"))\n\n\ndef describe(image, shape):\n return _face_model.compute_face_descriptor(image, shape)\n" } ]
10
yeahmatte/hello-service
https://github.com/yeahmatte/hello-service
62b9962062d42bc3b1081f7a6baad5dadf28013c
5525524a6d106a6b4eede18a651cc4f2e8d101a2
8c847f43f3901e66ffdb1769d0434e9eb72280d0
refs/heads/master
2020-12-20T18:33:25.899498
2020-04-30T12:11:42
2020-04-30T12:11:42
236,171,633
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.623836100101471, "alphanum_fraction": 0.6350092887878418, "avg_line_length": 18.88888931274414, "blob_id": "569afc29f481530665c215e9585b581c1c417a0b", "content_id": "af56eaa5509464a9d53d316535568a0ce18c96f9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 537, "license_type": "permissive", "max_line_length": 89, "num_lines": 27, "path": "/app/app.py", "repo_name": "yeahmatte/hello-service", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask import request\nimport json\n\n#Flask-Cognito\n\napp = Flask(__name__)\n\n#@cogauth.identity_handler\n#def lookup_cognito_user(payload):\n# \"\"\"Look up user in our database from Cognito JWT payload.\"\"\"\n# return User.query.filter(User.cognito_username == payload['username']).one_or_none()\n\[email protected]('/')\ndef hello():\n return \"{}\".format(request.headers)\n\[email protected]('/hello/json')\ndef helloJSON():\n res = {}\n res['Hello'] = \"json\"\n\n return res\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=80)\n" }, { "alpha_fraction": 0.654285728931427, "alphanum_fraction": 0.668571412563324, "avg_line_length": 25.923076629638672, "blob_id": "6512f7881ee5b479ed94016567ac3c5cf96d3116", "content_id": "5178af356be779d7ad71b9db08d6b4f81f65dd1c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 350, "license_type": "permissive", "max_line_length": 59, "num_lines": 13, "path": "/Dockerfile", "repo_name": "yeahmatte/hello-service", "src_encoding": "UTF-8", "text": "FROM alpine:latest\n\nRUN apk add --no-cache python3 && \\\n python3 -m ensurepip && \\\n rm -r /usr/lib/python*/ensurepip && \\\n pip3 install --upgrade pip setuptools && \\\n rm -r /root/.cache\nCOPY ./install-tools/requirements.txt /tmp/requirements.txt\nRUN pip3 install -qr /tmp/requirements.txt\n\nENTRYPOINT [ \"python3\" ]\n\nCMD [ \"/app/app.py\" ]\n" }, { "alpha_fraction": 0.5275229215621948, "alphanum_fraction": 0.5642201900482178, "avg_line_length": 14.571428298950195, "blob_id": "fb67a784ee65ef7a05a04f8b6bf0ec99a4f8c113", "content_id": "6e82c09d8f1a4c7343469789b370d1a191a55122", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "YAML", "length_bytes": 218, "license_type": "permissive", "max_line_length": 33, "num_lines": 14, "path": "/docker-compose.yml", "repo_name": "yeahmatte/hello-service", "src_encoding": "UTF-8", "text": "version: \"3.3\"\n\nservices:\n\n hello:\n build:\n context: .\n dockerfile: ./Dockerfile\n container_name: hello_service\n ports:\n - 5002:80\n volumes:\n - \"./app:/app\"\n restart: unless-stopped\n" }, { "alpha_fraction": 0.7241379022598267, "alphanum_fraction": 0.7241379022598267, "avg_line_length": 13.5, "blob_id": "fae39326e1458e059240b7e91bf35213adf21777", "content_id": "61585c212997d959dc65d1da33f004b3c3285a3d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 58, "license_type": "permissive", "max_line_length": 25, "num_lines": 4, "path": "/start.sh", "repo_name": "yeahmatte/hello-service", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n#docker-compose pull\nsudo docker-compose up -d\n" } ]
4
siddeshbist/4dn4-InternetCommunication
https://github.com/siddeshbist/4dn4-InternetCommunication
af9123b8fdfcd18befe04285d7396068850bfda8
71ac49c09d2e8d21c18028e552777a4fbeb9e7f7
a629a3794cf9311dd6689c62ac9962254ff6bee6
refs/heads/master
2023-04-06T02:15:44.819658
2021-04-27T15:46:34
2021-04-27T15:46:34
362,164,847
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5099969506263733, "alphanum_fraction": 0.5145767331123352, "avg_line_length": 36.26496887207031, "blob_id": "94137277e5b6524278a0b7a352108c27b3abb6e6", "content_id": "62c58f7faeea5579e897121b1cd6e540f70c9ebf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29259, "license_type": "no_license", "max_line_length": 101, "num_lines": 785, "path": "/lab3/lab3_FTP.py", "repo_name": "siddeshbist/4dn4-InternetCommunication", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n########################################################################\n#\n# GET File Transfer\n#\n# When the client connects to the server, it immediately sends a\n# 1-byte GET command followed by the requested filename. The server\n# checks for the GET and then transmits the file. The file transfer\n# from the server is prepended by an 8 byte file size field. These\n# formats are shown below.\n#\n# The server needs to have REMOTE_FILE_NAME defined as a text file\n# that the client can request. The client will store the downloaded\n# file using the filename LOCAL_FILE_NAME. This is so that you can run\n# a server and client from the same directory without overwriting\n# files.\n#\n########################################################################\n\nimport socket\nimport argparse\nimport threading\nimport sys\nimport time\nimport datetime\nimport os\n\n########################################################################\n\n# Define all of the packet protocol field lengths. See the\n# corresponding packet formats below.\nCMD_FIELD_LEN = 1 # 1 byte commands sent from the client.\nFILE_SIZE_FIELD_LEN = 8 # 8 byte file size field.\n\n# Packet format when a GET command is sent from a client, asking for a\n# file download:\n\n# -------------------------------------------\n# | 1 byte GET command | ... file name ... |\n# -------------------------------------------\n\n# When a GET command is received by the server, it reads the file name\n# then replies with the following response:\n\n# -----------------------------------\n# | 8 byte file size | ... file ... |\n# -----------------------------------\n\n# Define a dictionary of commands. The actual command field value must\n# be a 1-byte integer. For now, we only define the \"GET\" command,\n# which tells the server to send a file.\n\nCMD = { \n \"get\" : 1,\n \"put\" : 2,\n \"list\": 3,\n \"bye\": 4\n }\n\nMSG_ENCODING = \"utf-8\"\n \n########################################################################\n# SERVER\n########################################################################\n\nclass Server:\n\n HOSTNAME = \"127.0.0.1\"\n ALL_IF_HOSTNAME = \"0.0.0.0\"\n\n SDP_PORT = 30000 # file sharing service discovery port \n PORT = 50000 # FSP\n RECV_SIZE = 1024\n BACKLOG = 5\n\n FILE_NOT_FOUND_MSG = \"Error: Requested file is not available!\"\n\n # This is the file that the client will request using a GET.\n # REMOTE_FILE_NAMES = ['remotefile.txt']\n # REMOTE_FILE_NAMES = os.listdir('remotefiles') # gets array of files in RFILE directory\n REMOTE_FILE_NAMES = os.listdir('C:/Users/sahaj/Desktop/lab3-dn files/remotefiles')\n # REMOTE_FILE_PATH = os.getcwd() +'\\\\remotefiles\\\\' # path to files \n REMOTE_FILE_PATH = 'C:/Users/sahaj/Desktop/lab3-dn files/remotefiles/'\n\n MSG = \"SID-ABI-SAHAJ's File Sharing Service\"\n MSG_ENCODED = MSG.encode(MSG_ENCODING)\n\n\n def __init__(self):\n self.thread_list = []\n self.create_listen_socket()\n self.process_connections_forever()\n # tcp_thread = threading.Thread(target=self.process_connections_forever)\n # print(\"Starting serving thread: \", tcp_thread.name)\n # self.thread_list.append(tcp_thread)\n # tcp_thread.daemon = True\n # tcp_thread.start()\n # print(\"-\" * 72)\n\n # Service discovery\n def service_announcement(self):\n self.create_socket_sd()\n self.receive_forever_sd()\n\n def create_socket_sd(self):\n try:\n # Create an IPv4 UDP socket.\n self.socket_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # Get socket layer socket options.\n self.socket_udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Bind socket to socket address, i.e., IP address and port.\n self.socket_udp.bind( (Server.ALL_IF_HOSTNAME, Server.SDP_PORT) )\n except Exception as msg:\n print(msg)\n sys.exit(1)\n \n def receive_forever_sd(self):\n while True:\n try: \n print(Server.MSG, \"listening on SDP port {} ...\".format(Server.SDP_PORT))\n recvd_bytes, address = self.socket_udp.recvfrom(Server.RECV_SIZE)\n\n print(\"Received: \", recvd_bytes.decode('utf-8'), \" Address:\", address)\n \n # Decode the received bytes back into strings.\n recvd_str = recvd_bytes.decode(MSG_ENCODING)\n\n # Check if the received packet contains a service scan\n # command.\n # if Server.SCAN_CMD in recvd_str:\n # # Send the service advertisement message back to\n # # the client.\n self.socket_udp.sendto(Server.MSG_ENCODED, address)\n except KeyboardInterrupt:\n print()\n sys.exit(1)\n\n def printFiles(self):\n print('Files stored in remote directory: ')\n for files in Server.REMOTE_FILE_NAMES:\n print(files)\n\n def create_listen_socket(self):\n try:\n # forever running UDP socket for service annoucement\n # self.service_announcement()\n # print('Listening on service discovery messages on SDP port {}'.format(Server.SDP_PORT))\n sdp_thread = threading.Thread(target=self.service_announcement)\n print(\"\\nStarting serving thread: \", sdp_thread.name)\n self.thread_list.append(sdp_thread)\n sdp_thread.daemon = True\n sdp_thread.start()\n print(\"-\" * 72)\n\n\n # Create the TCP server listen socket in the usual way.\n \n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.bind((Server.HOSTNAME, Server.PORT))\n # tcp_thread = thread.Thread(target=self.socket.listen(Server.BACKLOG))\n self.socket.listen(Server.BACKLOG)\n print(\"Listening for file sharing connections on port {} ...\".format(Server.PORT))\n print('-'*72)\n\n except Exception as msg:\n print(msg)\n exit()\n\n def process_connections_forever(self):\n try:\n # sdp_thread = threading.Thread(target=self.service_announcement())\n # self.thread_list.append(sdp_thread)\n # sdp_thread.start()\n while True: \n\n # self.connection_handler(self.socket.accept())\n new_client = self.socket.accept()\n\n # new client connected. Create new thread to process\n new_thread = threading.Thread(target=self.connection_handler,\n args=(new_client,))\n \n # Record the new thread.\n self.thread_list.append(new_thread)\n\n # Start the new thread running.\n print(\"Starting serving thread: \", new_thread.name)\n new_thread.daemon = True\n new_thread.start()\n \n except Exception as msg:\n print(msg)\n except KeyboardInterrupt:\n print()\n finally:\n print(\"Closing server socket...\")\n self.socket.close()\n sys.exit(1)\n\n def connection_handler(self, client):\n connection, address = client\n print(\"-\" * 72)\n print(\"Connection received from {}.\".format(address))\n\n\n while True:\n\n # recvd_bytes = connection.recv(Server.RECV_SIZE)\n\n # Read the command and see if it is a GET.\n cmd = int.from_bytes(connection.recv(CMD_FIELD_LEN), byteorder='big')\n print(cmd)\n\n if (cmd not in CMD.values()):\n # print(\"Closing {} client connection ... \".format(address))\n # connection.close()\n print('Incorrect command received!')\n # Break will exit the connection_handler and cause the\n # thread to finish.clear\n break\n\n\n if(cmd == CMD['bye']):\n print('Closing {} client connection ...'.format(address))\n print('-'*72)\n connection.close()\n break\n\n\n\n # if cmd != CMD[\"GET\"]:\n # print(\"GET command not received!\")\n # return\n\n\n elif (cmd == CMD['list']):\n # recvd_bytes = connection.recv(Server.RECV_SIZE)\n # recvd_str = recvd_bytes.decode(MSG_ENCODING)\n remoteFiles = Server.REMOTE_FILE_NAMES\n print(remoteFiles)\n remoteFiles_str = ''\n for rfile in remoteFiles:\n remoteFiles_str += rfile + ' '\n remoteFiles_bytes = remoteFiles_str.encode(MSG_ENCODING)\n\n # list_bytes = [remoteFile.encode(MSG_ENCODING) for remoteFile in remoteFiles]\n # list_size_bytes = len(list_bytes)\n # list_size_field = list_size_bytes.to_bytes(FILE_SIZE_FIELD_LEN, byteorder='big')\n\n # Create the packet to be sent with the header field.\n # pkt = file_size_field + file_bytes\n # print('Sending RLIST directory to client...')\n try:\n # Send the packet to the connected client.\n # for rfile in list_bytes:\n # connection.sendall(rfile)\n connection.sendall(remoteFiles_bytes)\n # print(\"Sent packet bytes: \\n\", pkt)\n print(\"Sending FSD to client ... \")\n except socket.error:\n # If the client has closed the connection, close the\n # socket on this end.\n print(\"Closing client connection ...\")\n connection.close()\n return\n\n elif(cmd == CMD['put']):\n\n # filename_bytes = connection.recv(Server.RECV_SIZE)\n filename_size_bytes = connection.recv(FILE_SIZE_FIELD_LEN)\n filename_size = int.from_bytes(filename_size_bytes,byteorder='big')\n filename_bytes = connection.recv(filename_size)\n # filename_bytes = connection.recv(FILE_SIZE_FIELD_LEN)\n filename_str = filename_bytes.decode(MSG_ENCODING)\n print('filename size: ', filename_size)\n print('filename: {} received'.format(filename_str))\n \n file_type = filename_str.split('.')[1]\n \n\n # Read the file size field.\n file_size_bytes = connection.recv(FILE_SIZE_FIELD_LEN)\n if len(file_size_bytes) == 0:\n connection.close()\n return\n\n # Make sure that you interpret it in host byte order.\n file_size = int.from_bytes(file_size_bytes, byteorder='big')\n print('file size:', file_size)\n\n\n # Receive the file itself.\n recvd_bytes_total = bytearray()\n try:\n # Keep doing recv until the entire file is downloaded. \n while len(recvd_bytes_total) < file_size:\n recvd_bytes_total += connection.recv(Server.RECV_SIZE)\n\n # Create a file using the received filename and store the\n # data.\n print(\"Received {} bytes. Creating file: {}\" \\\n .format(len(recvd_bytes_total), filename_str)) \n \n new_file_path = Server.REMOTE_FILE_PATH + filename_str\n # print(new_file_path)\n\n if (file_type == 'txt'):\n with open(new_file_path, 'w') as f:\n f.write(recvd_bytes_total.decode(MSG_ENCODING))\n else:\n with open(new_file_path, 'wb') as f:\n f.write(recvd_bytes_total)\n print('File successfully uploaded!\\n')\n # uploaded = 'complete'\n except KeyboardInterrupt:\n print()\n exit(1)\n # If the socket has been closed by the server, break out\n # and close it on this end.\n except socket.error:\n print('SOCKET ERROR')\n connection.close()\n\n\n elif (cmd == CMD['get']):\n # The command is good. Now read and decode the requested\n # filename.\n filename_bytes = connection.recv(Server.RECV_SIZE)\n filename_str = filename_bytes.decode(MSG_ENCODING)\n print(filename_str)\n file_path = Server.REMOTE_FILE_PATH + filename_str\n # print(file_path)\n\n\n # Open the requested file and get set to send it to the\n # client.\n file_type = filename_str.split('.')[1]\n print(file_type)\n print(file_path)\n try:\n if(file_type == 'txt'):\n file = open(file_path, 'r').read()\n file_bytes = file.encode(MSG_ENCODING)\n else:\n file = open(file_path,'rb').read()\n file_bytes = file\n except FileNotFoundError:\n print(Server.FILE_NOT_FOUND_MSG)\n connection.close() \n return\n\n # Encode the file contents into bytes, record its size and\n # generate the file size field used for transmission.\n # file_bytes = file.encode(MSG_ENCODING)\n file_size_bytes = len(file_bytes)\n file_size_field = file_size_bytes.to_bytes(FILE_SIZE_FIELD_LEN, byteorder='big')\n\n # Create the packet to be sent with the header field.\n pkt = file_size_field + file_bytes\n \n try:\n # Send the packet to the connected client.\n connection.sendall(pkt)\n # print(\"Sent packet bytes: \\n\", pkt)\n print(\"Sending file: \", filename_str)\n except socket.error:\n # If the client has closed the connection, close the\n # socket on this end.\n print(\"Closing client connection ...\")\n connection.close()\n return\n\n########################################################################\n# CLIENT\n########################################################################\n\nclass Client:\n\n RECV_SIZE = 10\n RECV_SIZE_SD = 1024\n # Define the local file name where the downloaded file will be\n # saved.\n\n # LOCAL_FILE_NAMES = os.listdir('localfiles') # array of all files in localfiles\n # LOCAL_FILE_PATH = os.getcwd() +'\\\\localfiles\\\\'\n\n LOCAL_FILE_NAMES = os.listdir('C:/Users/sahaj/Desktop/lab3-dn files/localfiles')\n # REMOTE_FILE_PATH = os.getcwd() +'\\\\remotefiles\\\\' # path to files \n LOCAL_FILE_PATH = 'C:/Users/sahaj/Desktop/lab3-dn files/localfiles/'\n\n # Service discovery \n BROADCAST_ADDRESS = \"255.255.255.255\"\n BROADCAST_PORT = 30000\n BROADCAST_ADDRESS_PORT = (BROADCAST_ADDRESS,BROADCAST_PORT)\n SCAN_CYCLES = 3\n SCAN_TIMEOUT = 3\n\n\n def __init__(self):\n # self.printLocalFiles()\n # self.get_console_input()\n self.send_console_input_forever()\n self.connected = 0\n # self.get_socket()\n # self.connect_to_server()\n # self.get_file()\n\n def tcp_connection(self):\n while self.connected ==1:\n self.get_socket()\n self.connect_to_server()\n self.send_console_input_forever()\n\n\n def get_socket(self):\n try:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except Exception as msg:\n print(msg)\n exit()\n\n def connect_to_server(self):\n try:\n self.socket.connect((Server.HOSTNAME, Server.PORT))\n except Exception as msg:\n print(msg)\n exit()\n\n def socket_recv_size(self, length):\n bytes = self.socket.recv(length)\n if len(bytes) < length:\n self.socket.close()\n exit()\n return(bytes)\n \n def get_console_input(self):\n # In this version we keep prompting the user until a non-blank\n # line is entered.\n while True:\n print('\\n'+'*'*50)\n self.input_text = input(\"--(scan, connect, llist, rlist, put, get)--\\n Enter Command: \")\n try:\n \n if (self.input_text == \"scan\"):\n self.service_discovery()\n \n elif (self.input_text.split()[0] == \"get\"):\n if(len(self.input_text.split())<2):\n print('ENTER FORMAT: get <filename>')\n break\n\n fetch_text = self.input_text.split()[1] \n if (fetch_text not in Server.REMOTE_FILE_NAMES):\n print('File not in remote FSD!')\n break\n self.get_file(fetch_text)\n\n elif(self.input_text.split()[0] == \"connect\"):\n\n # if(len(self.input_text.split())<3):\n # print('ENTER FORMAT: connect <IPADDR> <PORT>')\n # break\n # elif (self.input_text.split()[1] != Server.HOSTNAME):\n # print('INVALID IP ADDRESS ENTERED')\n # break\n # elif (int(self.input_text.split()[2]) != Server.PORT):\n # print('INVALID PORT ENTERED')\n # break\n self.connected = 1\n self.tcp_connection()\n \n elif (self.input_text.split()[0] == \"put\"):\n if(len(self.input_text.split())<2):\n print('ENTER FORMAT: put <filename>')\n break\n\n upload_text = self.input_text.split()[1] \n if (upload_text not in Client.LOCAL_FILE_NAMES):\n print('File not in local directory!')\n break\n self.put_file(upload_text)\n \n elif(self.input_text == \"llist\"):\n self.printLocalFiles()\n\n elif(self.input_text == \"rlist\"):\n self.remote_list()\n \n elif(self.input_text =='bye'):\n self.bye()\n \n elif(self.input_text not in CMD.keys()):\n print('COMMAND NOT RECOGNIZED')\n break\n\n if self.input_text != \"\":\n break\n\n except IndexError: # no command entered\n break\n\n def bye(self):\n # convert bye in commands to bytes and send to server\n bye_field = CMD[\"bye\"].to_bytes(CMD_FIELD_LEN, byteorder='big')\n self.socket.sendall(bye_field)\n\n print('CLOSING CONNECTION')\n self.connected = 0\n # close client socket\n self.socket.close()\n sys.exit(1)\n \n\n\n # SERVICE DISCOVERY\n def service_discovery(self):\n self.get_socket_udp()\n self.scan_for_service()\n\n def get_socket_udp(self):\n try:\n # Service discovery done using UDP packets.\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Arrange to send a broadcast service discovery packet.\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n\n # Set the socket for a socket.timeout if a scanning recv\n # fails.\n self.socket.settimeout(Client.SCAN_TIMEOUT);\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def scan_for_service(self):\n # Collect our scan results in a list.\n scan_results = []\n SCAN_CMD = \"SCAN\"\n SCAN_CMD_ENCODED = SCAN_CMD.encode(MSG_ENCODING)\n # Repeat the scan procedure a preset number of times.\n for i in range(Client.SCAN_CYCLES):\n\n # Send a service discovery broadcast.\n print(\"Sending broadcast scan {}\".format(i)) \n self.socket.sendto(SCAN_CMD_ENCODED, Client.BROADCAST_ADDRESS_PORT)\n \n while True:\n # Listen for service responses. So long as we keep\n # receiving responses, keep going. Timeout if none are\n # received and terminate the listening for this scan\n # cycle.\n try:\n recvd_bytes, address = self.socket.recvfrom(Client.RECV_SIZE_SD)\n recvd_msg = recvd_bytes.decode(MSG_ENCODING)\n\n # Record only unique services that are found.\n if (recvd_msg, address) not in scan_results:\n scan_results.append((recvd_msg, address))\n continue\n # If we timeout listening for a new response, we are\n # finished.\n except socket.timeout:\n break\n\n # Output all of our scan results, if any.\n if scan_results:\n for result in scan_results:\n print(result)\n else:\n print(\"No services found.\")\n\n\n \n def send_console_input_forever(self):\n while True:\n try:\n self.get_console_input()\n # self.connection_send()\n # self.connection_receive()\n except (KeyboardInterrupt, EOFError):\n print()\n print(\"Closing server connection ...\")\n self.socket.close()\n sys.exit(1)\n\n def connection_send(self):\n try:\n # Send string objects over the connection. The string must\n # be encoded into bytes objects first.\n self.socket.sendall(self.input_text.encode(Server.MSG_ENCODING))\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def connection_receive(self):\n try:\n # Receive and print out text. The received bytes objects\n # must be decoded into string objects.\n recvd_bytes = self.socket.recv(Client.RECV_BUFFER_SIZE)\n\n # recv will block if nothing is available. If we receive\n # zero bytes, the connection has been closed from the\n # other end. In that case, close the connection on this\n # end and exit.\n if len(recvd_bytes) == 0:\n print(\"Closing server connection ... \")\n self.socket.close()\n sys.exit(1)\n\n print(\"Received: \", recvd_bytes.decode(Server.MSG_ENCODING))\n\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def printLocalFiles(self):\n for files in Client.LOCAL_FILE_NAMES:\n print(files)\n\n def remote_list(self):\n \n # Create packet LIST field \n rlist_field = CMD[\"list\"].to_bytes(CMD_FIELD_LEN, byteorder='big')\n # print(rlist_field)\n \n # send packet to server \n self.socket.sendall(rlist_field)\n\n try:\n recvd_bytes = self.socket.recv(Client.RECV_SIZE_SD)\n\n # if len(recvd_bytes) == 0:\n # print('Closing server connection ...')\n # self.socket.close()\n # sys.exit(1)\n # if len(file_size_bytes) == 0:\n # self.socket.close()\n # return\n \n print(\"FILES STORED IN FSD: \", recvd_bytes.decode(MSG_ENCODING))\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n \n def get_file(self, filename):\n\n # Create the packet GET field.\n get_field = CMD[\"get\"].to_bytes(CMD_FIELD_LEN, byteorder='big')\n\n # Create the packet filename field.\n # filename_field = Server.REMOTE_FILE_NAME.encode(MSG_ENCODING)\n filename_field = filename.encode(MSG_ENCODING)\n\n # Create the packet.\n pkt = get_field + filename_field\n\n # Send the request packet to the server.\n self.socket.sendall(pkt)\n\n file_type = filename.split('.')[1]\n\n # Read the file size field.\n file_size_bytes = self.socket_recv_size(FILE_SIZE_FIELD_LEN)\n if len(file_size_bytes) == 0:\n self.socket.close()\n return\n\n # Make sure that you interpret it in host byte order.\n file_size = int.from_bytes(file_size_bytes, byteorder='big')\n print(file_size)\n\n # Receive the file itself. Initalize byte object\n recvd_bytes_total = bytearray()\n try:\n # Keep doing recv until the entire file is downloaded. \n while len(recvd_bytes_total) < file_size:\n recvd_bytes_total += self.socket.recv(Client.RECV_SIZE)\n\n # Create a file using the received filename and store the\n # data.\n print(\"Received {} bytes. Creating file: {}\" \\\n .format(len(recvd_bytes_total), filename)) #Client.LOCAL_FILE_NAME))\n\n # with open(Client.LOCAL_FILE_NAME, 'w') as f:\n # f.write(recvd_bytes_total.decode(MSG_ENCODING))\n \n new_file_path = Client.LOCAL_FILE_PATH + filename\n print(new_file_path)\n if (file_type == 'txt'):\n with open(new_file_path, 'w') as f:\n f.write(recvd_bytes_total.decode(MSG_ENCODING))\n else:\n with open(new_file_path, 'wb') as f:\n f.write(recvd_bytes_total)\n except KeyboardInterrupt:\n print()\n exit(1)\n # If the socket has been closed by the server, break out\n # and close it on this end.\n except socket.error:\n self.socket.close()\n \n def put_file(self, filename):\n \n # The command is good. Now read and decode the requested\n # filename.\n put_field = CMD[\"put\"].to_bytes(CMD_FIELD_LEN, byteorder='big')\n # encode filename\n filename_field = filename.encode(MSG_ENCODING)\n filename_size = len(filename_field)\n filename_size_field = filename_size.to_bytes(FILE_SIZE_FIELD_LEN,byteorder='big')\n\n # packet to send with put, size of filename, filename\n pkt = put_field + filename_size_field + filename_field\n # 3 + 13 + localfile.txt\n # self.socket.sendall(pkt) \n\n # path of file in localfiles to be sent to FSD \n file_path = Client.LOCAL_FILE_PATH + filename\n file_type = filename.split('.')[1]\n\n # open requested file and read contents \n try: \n if (file_type == 'txt'):\n file = open(file_path, 'r').read()\n file_bytes = file.encode(MSG_ENCODING)\n else:\n file = open(file_path,'rb').read()\n file_bytes = file\n\n except FileNotFoundError:\n print('LOCAL FILE NOT FOUND')\n return\n \n # size of file to bytes - 8 in len\n # file_bytes = file.encode(MSG_ENCODING)\n file_size_bytes = len(file_bytes)\n file_size_field = file_size_bytes.to_bytes(FILE_SIZE_FIELD_LEN, byteorder='big')\n \n send_total_bytes = bytearray()\n print('file_size_fiekd: ', len(file_size_field))\n # print('file_bytes: ', file_bytes)\n \n\n # pkt2 = put_field + file_size_field + file_bytes\n pkt2 = pkt + file_size_field + file_bytes\n\n # put_command + 13 + localfile.txt + 20 + 'this is a localfile'\n try:\n # Send the packet to the connected client.\n self.socket.sendall(pkt2)\n # print(\"Sent packet bytes: \\n\", pkt)\n print(\"Sending file: \", filename)\n # if keyboard interrupt --> cancel file upload\n except KeyboardInterrupt:\n print('FILE WAS NOT UPLOADED - CONNECTION CLOSE')\n connection.close()\n except socket.error:\n # If the client has closed the connection, close the\n # socket on this end.\n print(\"Closing client connection ...\")\n connection.close()\n return\n \n########################################################################\n\nif __name__ == '__main__':\n roles = {'client': Client,'server': Server}\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-r', '--role',\n choices=roles, \n help='server or client role',\n required=True, type=str)\n\n args = parser.parse_args()\n roles[args.role]()\n\n########################################################################\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.48558998107910156, "alphanum_fraction": 0.49592170119285583, "avg_line_length": 33.97142791748047, "blob_id": "b26c245c740ced195d5b911d9edead51a09db0bd", "content_id": "d0e8715fc2aa6413b4623766c94d79a7ac531af8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3678, "license_type": "no_license", "max_line_length": 81, "num_lines": 105, "path": "/lab3/templates/service_discovery_cycles.py", "repo_name": "siddeshbist/4dn4-InternetCommunication", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n########################################################################\n\nimport socket\nimport argparse\nimport sys\nimport time\nimport datetime\n\n########################################################################\n# Service Discovery\n#\n# In this version, the client broadcasts service discovery packets and\n# receives server responses. After a broadcast, the client continues\n# to receive responses until a socket timeout occurs, indicating that\n# no more responses are available. This scan process is repeated a\n# fixed number of times. The discovered services are then output.\n# \n########################################################################\n\n########################################################################\n# Service Discovery Client\n########################################################################\n\nclass Client:\n\n RECV_SIZE = 1024\n MSG_ENCODING = \"utf-8\" \n\n BROADCAST_ADDRESS = \"255.255.255.255\"\n # BROADCAST_ADDRESS = \"192.168.1.255\" \n SERVICE_PORT = 30000\n ADDRESS_PORT = (BROADCAST_ADDRESS, SERVICE_PORT)\n\n SCAN_CYCLES = 3\n SCAN_TIMEOUT = 5\n\n SCAN_CMD = \"SCAN\"\n SCAN_CMD_ENCODED = SCAN_CMD.encode(MSG_ENCODING)\n\n def __init__(self):\n self.get_socket()\n self.scan_for_service()\n\n def get_socket(self):\n try:\n # Service discovery done using UDP packets.\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Arrange to send a broadcast service discovery packet.\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n\n # Set the socket for a socket.timeout if a scanning recv\n # fails.\n self.socket.settimeout(Client.SCAN_TIMEOUT);\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def scan_for_service(self):\n # Collect our scan results in a list.\n scan_results = []\n\n # Repeat the scan procedure a preset number of times.\n for i in range(Client.SCAN_CYCLES):\n\n # Send a service discovery broadcast.\n print(\"Sending broadcast scan {}\".format(i)) \n self.socket.sendto(Client.SCAN_CMD_ENCODED, Client.ADDRESS_PORT)\n \n while True:\n # Listen for service responses. So long as we keep\n # receiving responses, keep going. Timeout if none are\n # received and terminate the listening for this scan\n # cycle.\n try:\n recvd_bytes, address = self.socket.recvfrom(Client.RECV_SIZE)\n recvd_msg = recvd_bytes.decode(Client.MSG_ENCODING)\n\n # Record only unique services that are found.\n if (recvd_msg, address) not in scan_results:\n scan_results.append((recvd_msg, address))\n continue\n # If we timeout listening for a new response, we are\n # finished.\n except socket.timeout:\n break\n\n # Output all of our scan results, if any.\n if scan_results:\n for result in scan_results:\n print(result)\n else:\n print(\"No services found.\")\n \n########################################################################\n# Fire up a client if run directly.\n########################################################################\n\nif __name__ == '__main__':\n Client()\n\n########################################################################\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5087341666221619, "alphanum_fraction": 0.513593316078186, "avg_line_length": 32.929019927978516, "blob_id": "9cb126d54bc1b948496915bca9aaa9040ef281db", "content_id": "ec62712d73ee91a96ee79d084f40dae0ac67e2d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16258, "license_type": "no_license", "max_line_length": 120, "num_lines": 479, "path": "/lab4/MulticastSenderReceiverConfig.py", "repo_name": "siddeshbist/4dn4-InternetCommunication", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n########################################################################\n\nimport socket\nimport argparse\nimport sys\nimport time\nimport struct\nimport threading\nimport json\n#from pynput import keyboard\n# global end_t\n# end_t = 0\n\n\n\n########################################################################\n\n# Read in the config.py file to set various addresses and ports.\n#from config import *\nCMD_FIELD_LEN = 1\nFILE_SIZE_FIELD_LEN = 8\n\nCMD = {\n \"getdir\":1,\n \"makeroom\":2,\n \"deleteroom\":3,\n \"bye\":4,\n}\n\n\n########################################################################\n# Broadcast Server class\n########################################################################\n\nclass Sender:\n\n # HOSTNAME = socket.gethostbyname('')\n HOSTNAME = 'localhost'\n CDP_PORT = 50000\n BACKLOG = 5\n\n TIMEOUT = 2\n RECV_SIZE = 256\n \n MSG_ENCODING = \"utf-8\"\n # MESSAGE = HOSTNAME + \"multicast beacon: \"\n MESSAGE = \"Hello from \" \n MESSAGE_ENCODED = MESSAGE.encode('utf-8')\n\n # TTL = 1 # Hops\n # TTL_SIZE = 1 # Bytes\n # TTL_BYTE = TTL.to_bytes(TTL_SIZE, byteorder='big')\n # OR: TTL_BYTE = struct.pack('B', TTL)\n\n #chatroomDict = {}\n\n def __init__(self):\n self.connected = 0\n self.chatroomDict = {}\n self.thread_list = []\n self.create_listen_socket()\n self.process_connections_forever()\n #self.send_messages_forever()\n\n # def getDict(self,dic):\n # print(dic.items())\n # return dic.items()\n\n\n def create_listen_socket(self):\n try:\n #create TCP server listen socket\n\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket.bind((Sender.HOSTNAME, Sender.CDP_PORT))\n # tcp_thread = thread.Thread(target=self.socket.listen(Server.BACKLOG))\n self.socket.listen(Sender.BACKLOG)\n print(\"WELCOME TO THE CRDS \\nListening for TCP connections on CDP Port {} ...\".format(Sender.CDP_PORT))\n print('-'*72)\n\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def connection_handler(self,client):\n connection,address = client\n while True:\n #read the command sent from client to server\n cmd = int.from_bytes(connection.recv(CMD_FIELD_LEN), byteorder='big')\n #print(cmd)\n if (cmd not in CMD.values()):\n print('Incorrect command received')\n break\n \n if (cmd==CMD['bye']):\n print('Closing {} client connection...'.format(address))\n print('-'*72)\n connection.close()\n break\n\n elif(cmd == CMD['getdir']):\n try:\n #print(\"in the server\")\n dicttoSend = json.dumps(self.chatroomDict)\n chatroomDict = dicttoSend.encode(Sender.MSG_ENCODING)\n connection.sendall(chatroomDict)\n print(\"Sending dictionary\")\n except socket.error:\n print(\"Closing client connection...\")\n connection.close()\n return\n\n elif(cmd == CMD['makeroom']):\n #decode message from client\n #print(\"decoding message\")\n room_bytes = connection.recv(Sender.RECV_SIZE)\n room_str = room_bytes.decode(Sender.MSG_ENCODING)\n #print(room_str)\n \n\n\n #check address/port combination is unique\n \n \n #print(list(self.chatroomDict.values()))\n #print(\"this ist the length\",len(self.chatroomDict))\n if len(self.chatroomDict)>0:\n for value in list(self.chatroomDict.values()):\n #print(\"inside for loop\")\n if value!= (room_str.split(\",\")[1],room_str.split(\",\")[2]):\n self.chatroomDict[room_str.split(\",\")[0]] = (room_str.split(\",\")[1],room_str.split(\",\")[2])\n else:\n print(\"address/port already exits\")\n else:\n self.chatroomDict[room_str.split(\",\")[0]] = (room_str.split(\",\")[1],room_str.split(\",\")[2])\n\n \n\n elif(cmd==CMD['deleteroom']):\n #print(\"Inside delete function\")\n deleteroom_bytes = connection.recv(Sender.RECV_SIZE)\n deleteroom_str = deleteroom_bytes.decode(Sender.MSG_ENCODING)\n print(deleteroom_str)\n print(self.chatroomDict)\n self.chatroomDict.pop(deleteroom_str)\n #print(\"POPPED OFF\")\n print(self.chatroomDict)\n \n\n\n\n elif (cmd==CMD['bye']):\n print(\"CLOSING CONNECTION\")\n #closing client connection to server but not server socket as its still listening\n connection.close()\n break\n\n\n\n def process_connections_forever(self):\n try:\n while True:\n new_client = self.socket.accept()\n new_thread = threading.Thread(target=self.connection_handler,args=(new_client,))\n self.thread_list.append(new_thread)\n #print(\"Starting serving thread:\", new_thread.name)\n print(\"Connected to the client\")\n\n new_thread.daemon = True\n new_thread.start()\n\n except Exception as msg:\n print(msg)\n except KeyboardInterrupt:\n print()\n finally:\n print(\"Closing server socket...\")\n self.socket.close()\n sys.exit(1)\n\n\n########################################################################\n# Echo Receiver class\n########################################################################\n\nclass Receiver:\n\n RECV_SIZE = 256\n MSG_ENCODING = 'utf-8'\n TTL = 1 # Hops\n TTL_SIZE = 1 # Bytes\n TTL_BYTE = TTL.to_bytes(TTL_SIZE, byteorder='big')\n TIMEOUT = 2\n\n def __init__(self):\n self.userName = \"\"\n self.chatroomName = \"\"\n self.flag = 0\n self.chatroomInfo = {}\n self.thread_list = []\n self.get_console_input()\n \n\n\n def tcp_connection(self):\n if self.connected ==1:\n self.get_socket()\n self.connect_to_server()\n #self.send_console_input_forever()\n\n def get_socket(self):\n try:\n #create TCP socket\n self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n except Exception as msg:\n print(msg)\n exit()\n\n def connect_to_server(self):\n try:\n #create TCP connection\n self.socket.connect((Sender.HOSTNAME, Sender.CDP_PORT))\n except Exception as msg:\n print(msg)\n exit()\n\n\n def getdir(self):\n #print(\"inside getdir\")\n #create packet and send to server\n getdir_field = CMD[\"getdir\"].to_bytes(CMD_FIELD_LEN,byteorder='big')\n self.socket.sendall(getdir_field)\n try:\n recvd_bytes = self.socket.recv(Receiver.RECV_SIZE)\n json_str = recvd_bytes.decode(Receiver.MSG_ENCODING)\n #print(json.loads(json_str))\n #print(type(json.loads(json_str)))\n test_dict = json.loads(json_str)\n self.chatroomInfo = dict(list(test_dict.items()))\n print(self.chatroomInfo)\n \n \n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def makeroom(self,roomName,address,port):\n makeroom_field = CMD[\"makeroom\"].to_bytes(CMD_FIELD_LEN,byteorder='big')\n roomName = roomName + \",\"\n roomNameField = roomName.encode(Receiver.MSG_ENCODING)\n address = address + \",\"\n addressField = address.encode(Receiver.MSG_ENCODING)\n portField = port.encode(Receiver.MSG_ENCODING)\n pkt = makeroom_field + roomNameField + addressField + portField\n #print(\"sending packet\")\n self.socket.sendall(pkt)\n\n\n def send_messages_forever(self,MULTICAST_ADDRESS,MULTICAST_PORT):\n try:\n while True:\n self.chat_text = input()\n MULTICAST_PORT = int(MULTICAST_PORT)\n MULTICAST_ADDRESS_PORT = (MULTICAST_ADDRESS, MULTICAST_PORT)\n if self.chat_text == \"~]\" :\n #send message to receive socket to signal it to finish\n killMsg = \"Sender thread closing\".encode(Receiver.MSG_ENCODING)\n self.socket_udp.sendto(killMsg,MULTICAST_ADDRESS_PORT)\n return\n nameField = self.userName.encode(Receiver.MSG_ENCODING)\n dashField = \":\".encode(Receiver.MSG_ENCODING)\n textField = self.chat_text.encode(Receiver.MSG_ENCODING)\n pkt = nameField + dashField + textField\n self.socket_udp.sendto(pkt,MULTICAST_ADDRESS_PORT)\n time.sleep(Sender.TIMEOUT)\n except KeyboardInterrupt:\n #print(\"There has been an error\",msg)\n print(\"exiting\")\n return\n\n\n def receive_forever(self):\n while True:\n try:\n data, address_port = self.socket_r.recvfrom(Receiver.RECV_SIZE)\n address, port = address_port\n if data.decode('utf-8') == \"Sender thread closing\":\n #print(\"Closing receive thread\")\n return\n print(\"\\n\")\n print(data.decode('utf-8'))\n # except Exception as msg:\n # print(\"There has been an error\",msg)\n # #sys.exit(1)\n except KeyboardInterrupt:\n print(\"exiting\")\n return\n \n def deleteroomFunc(self,room):\n delete_field = CMD['deleteroom'].to_bytes(CMD_FIELD_LEN,byteorder='big')\n room_field = room.encode(Receiver.MSG_ENCODING)\n pkt = delete_field + room_field\n self.socket.sendall(pkt)\n \n\n def bye(self):\n bye_field = CMD['bye'].to_bytes(CMD_FIELD_LEN,byteorder='big')\n self.socket.sendall(bye_field)\n print(\"Closing connection to the CRDS\")\n self.connected = 0\n #closing TCP socket and cannot do self.socket functions\n self.socket.close()\n #sys.exit(1)\n\n def chat(self):\n #print(\"Inside chat function\")\n #find multicast ip address and port from local dictionary \n MULTICAST_ADDRESS,MULTICAST_PORT = self.chatroomInfo[self.chatroomName]\n #create udp send socket\n try:\n self.socket_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.socket_udp.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, Receiver.TTL_BYTE)\n #print(\"Create the first socket\")\n except Exception as msg:\n print(msg)\n\n #create udp receive socket and multicast group request\n try:\n self.socket_r = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.socket_r.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)\n MULTICAST_PORT = int(MULTICAST_PORT)\n self.socket_r.bind((MULTICAST_ADDRESS,MULTICAST_PORT))\n multicast_group_bytes = socket.inet_aton(MULTICAST_ADDRESS)\n RX_IFACE_ADDRESS = \"0.0.0.0\"\n multicast_if_bytes = socket.inet_aton(RX_IFACE_ADDRESS)\n print(\"before multicast request\")\n multicast_request = multicast_group_bytes + multicast_if_bytes\n self.socket_r.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, multicast_request)\n print(\"created the second socket\")\n except Exception as msg:\n print (msg)\n\n #change command prompt\n print('\\n'+'*'*50)\n print(\"Entering chatroom\")\n\n # #Create listener thread to listen for ctrl key\n # def pass_to_thread(self,key):\n # print(key.char)\n\n # def dummyFunction(self):\n # return False\n\n # h = keyboard.Listener(on_press = pass_to_thread, on_release = dummyFunction)\n # #h.daemon = True\n # self.thread_list.append(h)\n # h.start()\n\n\n\n\n\n #Create UDP thread \n udp_thread_send = threading.Thread(target=self.send_messages_forever,args=(MULTICAST_ADDRESS,MULTICAST_PORT))\n self.thread_list.append(udp_thread_send)\n #udp_thread_send.daemon = True\n udp_thread_send.start()\n \n\n #create UDP thread \n udp_thread_receive = threading.Thread(target=self.receive_forever)\n self.thread_list.append(udp_thread_receive)\n #udp_thread_receive.daemon = True\n udp_thread_receive.start()\n\n #set flag back to zero and go back to console_input\n udp_thread_send.join()\n udp_thread_receive.join()\n self.socket_udp.close()\n self.socket_r.close()\n self.flag = 0\n self.get_console_input()\n \n\n\n\n\n def get_console_input(self):\n while self.flag!=1:\n print('\\n'+'*'*50)\n self.input_text = input(\"--(connect,getdir, makeroom, deleteroom, name , chat)--\\n Enter Command: \")\n try:\n if (self.input_text == \"connect\"):\n self.connected = 1\n self.tcp_connection()\n print(\"Connected to the CRDS\")\n\n except Exception as msg:\n print(msg)\n\n try:\n if(self.input_text == \"getdir\"):\n #print(\"detected\")\n self.getdir()\n except Exception as msg:\n print(msg)\n\n try:\n if(self.input_text.split()[0] == \"makeroom\"):\n if(len(self.input_text.split())<2):\n print('ENTER FORMAT: makeroom <chat room name> <address> <port>')\n break\n\n chatRoomName = self.input_text.split()[1]\n address = self.input_text.split()[2]\n port = self.input_text.split()[3]\n\n self.makeroom(chatRoomName,address,port)\n\n except Exception as msg:\n print(msg)\n\n try:\n if(self.input_text.split()[0] == \"deleteroom\"):\n deleteroomName = self.input_text.split()[1]\n self.deleteroomFunc(deleteroomName)\n except Exception as msg:\n print(msg)\n\n\n try:\n if(self.input_text.split()[0] == \"chat\"):\n # chatRoomName = self.input_text.split()[1]\n # print(\"This si the name of the chatroom\",chatRoomName)\n # self.chat(chatRoomName)\n self.chatroomName = self.input_text.split()[1]\n self.flag = 1\n except Exception as msg:\n print(msg)\n\n try:\n if(self.input_text.split()[0] == \"name\"):\n self.userName = self.input_text.split()[1]\n print(\"This is the name entered\",self.userName)\n except Exception as msg:\n print(msg)\n\n try:\n if(self.input_text.split()[0] == \"bye\"):\n self.bye()\n except Exception as msg:\n print(msg)\n\n print(\"proceeding to chat function\")\n self.chat()\n\n\n\n\n########################################################################\n# Process command line arguments if run directly.\n########################################################################\n\nif __name__ == '__main__':\n roles = {'receiver': Receiver,'sender': Sender}\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-r', '--role',\n choices=roles, \n help='sender or receiver role',\n required=True, type=str)\n\n args = parser.parse_args()\n roles[args.role]()\n\n########################################################################\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.4953168034553528, "alphanum_fraction": 0.5013774037361145, "avg_line_length": 32.164634704589844, "blob_id": "ad4803e372c092ce309b35108b503912e30f2807", "content_id": "e9fef34c58ab21cb25415e5c46ca2cb146473a96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5445, "license_type": "no_license", "max_line_length": 86, "num_lines": 164, "path": "/lab3/templates/EchoClientServer_UDP.py", "repo_name": "siddeshbist/4dn4-InternetCommunication", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n########################################################################\n\nimport socket\nimport argparse\nimport sys\nimport time\n\n########################################################################\n# Echo Server class\n########################################################################\n\nclass Server:\n\n HOSTNAME = \"0.0.0.0\" \n # HOSTNAME = 'localhost' \n # HOSTNAME = socket.gethostbyname('')\n PORT = 50000\n\n RECV_SIZE = 256\n BACKLOG = 10\n \n MSG_ENCODING = \"utf-8\"\n\n def __init__(self):\n self.create_listen_socket()\n self.process_messages_forever()\n\n def create_listen_socket(self):\n try:\n # Create an IPv4 UDP socket.\n self.socket_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # Set socket layer socket options.\n self.socket_udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Bind socket to socket address, i.e., IP address and\n # port. Unlike TCP, there is no listen/accept. We can just\n # receive on the bound port.\n self.socket_udp.bind( (Server.HOSTNAME, Server.PORT) )\n print(\"Listening on port {} ...\".format(Server.PORT))\n\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def process_messages_forever(self):\n try:\n while True:\n # Do a recvfrom in order to obtain the identity of the\n # sender of the incoming packet.\n self.message_handler(self.socket.recvfrom(Server.RECV_SIZE))\n\n except Exception as msg:\n print(msg)\n except KeyboardInterrupt:\n print()\n finally:\n print(\"Closing server socket ... \")\n self.socket.close()\n sys.exit(1)\n\n def message_handler(self, client):\n # recvfrom returns the contents of the received segment and\n # the identity of the sender.\n msg_bytes, address_port = client\n msg = msg_bytes.decode(Server.MSG_ENCODING)\n print(\"-\" * 72)\n print(\"Message received from {}.\".format(address_port))\n print(\"Received Message Bytes: \", msg_bytes)\n print(\"Decoded Message: \", msg)\n # time.sleep(20) # for attacker.\n\n # Echo the received bytes back to the sender.\n self.socket.sendto(msg_bytes, address_port)\n print(\"Echoed Message: \", msg)\n # print(\"Encoded Echoed Message Bytes: \", msg_bytes)\n\n########################################################################\n# Echo Client class\n########################################################################\n\nclass Client:\n\n SERVER_ADDRESS_PORT = ('localhost', Server.PORT)\n RECV_SIZE = 256\n\n def __init__(self):\n self.get_socket()\n self.send_console_input_forever()\n\n def get_socket(self):\n try:\n # Create an IPv4 UDP socket.\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # Set socket layer socket options.\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def get_console_input(self):\n # In this version we keep prompting the user until a non-blank\n # line is entered.\n while True:\n self.input_text = input(\"Input: \")\n if self.input_text != '':\n self.input_text_encoded = self.input_text.encode(Server.MSG_ENCODING)\n break\n \n def send_console_input_forever(self):\n while True:\n try:\n self.get_console_input()\n self.message_send()\n self.message_receive()\n except (KeyboardInterrupt, EOFError):\n print()\n print(\"Closing client socket ...\")\n self.socket.close()\n sys.exit(1)\n \n def message_send(self):\n try:\n # sendto takes the bytes to be sent and the identity of\n # the destination.\n self.socket.sendto(self.input_text_encoded, Client.SERVER_ADDRESS_PORT)\n # print(\"Sent Message: \", self.input_text)\n # print(\"Sent Message Bytes: \", self.input_text_encoded)\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def message_receive(self):\n try:\n # recvfrom returns bytes received and the identity of the\n # sender.\n recvd_bytes, address = self.socket.recvfrom(Client.RECV_SIZE)\n # print(\"Received Message Bytes: \", recvd_bytes)\n print(\"Received Message: \", recvd_bytes.decode(Server.MSG_ENCODING))\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n########################################################################\n# Process command line arguments if run directly.\n########################################################################\n\nif __name__ == '__main__':\n roles = {'client': Client,'server': Server}\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-r', '--role',\n choices=roles, \n help='server or client role',\n required=True, type=str)\n\n args = parser.parse_args()\n roles[args.role]()\n\n########################################################################\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.44700339436531067, "alphanum_fraction": 0.4598272740840912, "avg_line_length": 30.270492553710938, "blob_id": "968dedf9ad6296412bf72ffc8934cedb2dd7c897", "content_id": "e39f422135f0e2a0f1d887a67b7425d515ce9425", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3821, "license_type": "no_license", "max_line_length": 84, "num_lines": 122, "path": "/lab3/templates/broadcast_send_receive.py", "repo_name": "siddeshbist/4dn4-InternetCommunication", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n########################################################################\n\nimport socket\nimport argparse\nimport sys\nimport time\n\n########################################################################\n# Broadcast Server class\n########################################################################\n\nclass Sender:\n\n # HOSTNAME = socket.gethostbyname('')\n # HOSTNAME = 'localhost'\n HOSTNAME = '0.0.0.0'\n\n # Send the broadcast packet periodically. Set the period\n # (seconds).\n BROADCAST_PERIOD = 2\n\n # Define the message to broadcast.\n MSG_ENCODING = \"utf-8\"\n MESSAGE = \"Hello from \" + HOSTNAME \n MESSAGE_ENCODED = MESSAGE.encode('utf-8')\n\n # Use the broadcast-to-everyone IP address or a directed broadcast\n # address. Define a broadcast port.\n BROADCAST_ADDRESS = \"255.255.255.255\" \n # BROADCAST_ADDRESS = \"192.168.1.255\"\n BROADCAST_PORT = 30000\n ADDRESS_PORT = (BROADCAST_ADDRESS, BROADCAST_PORT)\n\n def __init__(self):\n self.create_sender_socket()\n self.send_broadcasts_forever()\n\n def create_sender_socket(self):\n try:\n # Set up a UDP socket.\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n ############################################################\n # Set the option for broadcasting.\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n ############################################################ \n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def send_broadcasts_forever(self):\n try:\n while True:\n print(\"Sending to {} ...\".format(Sender.ADDRESS_PORT))\n self.socket.sendto(Sender.MESSAGE_ENCODED, Sender.ADDRESS_PORT)\n time.sleep(Sender.BROADCAST_PERIOD)\n except Exception as msg:\n print(msg)\n except KeyboardInterrupt:\n print()\n finally:\n self.socket.close()\n sys.exit(1)\n\n########################################################################\n# Echo Receiver class\n########################################################################\n\nclass Receiver:\n\n RECV_SIZE = 256\n\n HOST = \"0.0.0.0\"\n\n ADDRESS_PORT = (HOST, Sender.BROADCAST_PORT)\n\n def __init__(self):\n self.get_socket()\n self.receive_forever()\n\n def get_socket(self):\n try:\n # Create an IPv4 UDP socket.\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # Bind to all interfaces and the agreed on broadcast port.\n self.socket.bind(Receiver.ADDRESS_PORT)\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def receive_forever(self):\n while True:\n try:\n data, address = self.socket.recvfrom(Receiver.RECV_SIZE)\n print(\"Broadcast received: \", \n data.decode('utf-8'), address)\n except KeyboardInterrupt:\n print(); exit()\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n########################################################################\n# Process command line arguments if run directly.\n########################################################################\n\nif __name__ == '__main__':\n roles = {'receiver': Receiver,'sender': Sender}\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-r', '--role',\n choices=roles, \n help='sender or receiver role',\n required=True, type=str)\n\n args = parser.parse_args()\n roles[args.role]()\n\n########################################################################\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.4799249470233917, "alphanum_fraction": 0.48818010091781616, "avg_line_length": 31.426828384399414, "blob_id": "f2d774c82fc1e7892c31bee8635c456205c22473", "content_id": "753d3f3af132902eef1db4e032f02903d0b5c275", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2665, "license_type": "no_license", "max_line_length": 94, "num_lines": 82, "path": "/lab3/templates/service_announcement.py", "repo_name": "siddeshbist/4dn4-InternetCommunication", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\n########################################################################\n\nimport socket\nimport argparse\nimport sys\nimport time\nimport datetime\n\n########################################################################\n# Service Discovery Server\n#\n# The server listens on a UDP socket. When a service discovery packet\n# arrives, it returns a response with the name of the service.\n# \n########################################################################\n\nclass Server:\n\n ALL_IF_ADDRESS = \"0.0.0.0\"\n SERVICE_SCAN_PORT = 30000\n ADDRESS_PORT = (ALL_IF_ADDRESS, SERVICE_SCAN_PORT)\n\n MSG_ENCODING = \"utf-8\" \n \n SCAN_CMD = \"SCAN\"\n SCAN_CMD_ENCODED = SCAN_CMD.encode(MSG_ENCODING)\n \n MSG = \"SID-ABI-SAHAJ's File Sharing Service\"\n MSG_ENCODED = MSG.encode(MSG_ENCODING)\n\n RECV_SIZE = 1024\n BACKLOG = 10\n\n def __init__(self):\n self.create_socket()\n self.receive_forever()\n\n def create_socket(self):\n try:\n # Create an IPv4 UDP socket.\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # Get socket layer socket options.\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Bind socket to socket address, i.e., IP address and port.\n self.socket.bind( (Server.ALL_IF_ADDRESS, Server.SERVICE_SCAN_PORT) )\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def receive_forever(self):\n while True:\n try:\n print(Server.MSG, \"listening on port {} ...\".format(Server.SERVICE_SCAN_PORT))\n recvd_bytes, address = self.socket.recvfrom(Server.RECV_SIZE)\n\n print(\"Received: \", recvd_bytes.decode('utf-8'), \" Address:\", address)\n \n # Decode the received bytes back into strings.\n recvd_str = recvd_bytes.decode(Server.MSG_ENCODING)\n\n # Check if the received packet contains a service scan\n # command.\n if Server.SCAN_CMD in recvd_str:\n # Send the service advertisement message back to\n # the client.\n self.socket.sendto(Server.MSG_ENCODED, address)\n except KeyboardInterrupt:\n print()\n sys.exit(1)\n\n########################################################################\n# Process command line arguments if run directly.\n########################################################################\n\nif __name__ == '__main__':\n Server()\n\n########################################################################\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6879505515098572, "alphanum_fraction": 0.7116374969482422, "avg_line_length": 31.366666793823242, "blob_id": "5b6a648c9c3d8fa58b7d64c83f4b07448596c7f9", "content_id": "05bd1a701137d01d756589da7c178fa304519730", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 971, "license_type": "no_license", "max_line_length": 81, "num_lines": 30, "path": "/lab2/commands.py", "repo_name": "siddeshbist/4dn4-InternetCommunication", "src_encoding": "UTF-8", "text": "import getpass\nimport hashlib\n\n# 5 commands for the user to input from client \n# TCP connection created upon one of these get average commands -> printed on cmd\nGET_MIDTERM_AVG_CMD = \"GMA\"\nGET_LAB_1_AVG_CMD = \"GL1A\"\nGET_LAB_2_AVG_CMD = \"GL2A\"\nGET_LAB_3_AVG_CMD = \"GL3A\"\nGET_LAB_4_AVG_CMD = \"GL4A\"\n\n# if not one of 5 commands --> interpreted as HASHED ID/password \n\n# GET GRADES COMMAND - receive a specific student's grades \nGET_GRADES = \"GG\"\n# input id/pass from client \nid_num = input(\"ID Number: \")\npwd = getpass.getpass() # automatically prints password \n# TCP CONNECTION TO SERVER !!! \n# send message to server with secure hash of entered ID & pass \n# hashedID = hashlib.sha256(id_num.encode('utf-8')).digest()\n# hashedPass = hashlib.sha256(password.encode('utf-8')).digest()\n# hashboth = hashedID+hashedPass\n# print(hashedID)\n# print(hashedPass)\n# print(hashboth)\nm = hashlib.sha256()\nm.update(id_num.encode('utf-8'))\nm.update(pwd.encode('utf-8'))\nprint(m.digest())\n" }, { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 24, "blob_id": "89751d63ac058afd76736bb004ba7b7b3458fb13", "content_id": "2982c457f1cf2b5992f4325c0b1fa94b0508e265", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 24, "license_type": "no_license", "max_line_length": 24, "num_lines": 1, "path": "/lab4/README.md", "repo_name": "siddeshbist/4dn4-InternetCommunication", "src_encoding": "UTF-8", "text": "# Lab04-4DN4---Chat-Room" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.795918345451355, "avg_line_length": 23.5, "blob_id": "66d806d0b79dd9a54925acfca38db6204702e9ec", "content_id": "edc4f10db2e432c709909de75b38dd620a46d99a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 98, "license_type": "no_license", "max_line_length": 47, "num_lines": 4, "path": "/lab2/README.md", "repo_name": "siddeshbist/4dn4-InternetCommunication", "src_encoding": "UTF-8", "text": "# lab2AZ\nGradeRetrieval.py -r server and client to run \n\nechoclientserver.py there for reference\n" }, { "alpha_fraction": 0.501052975654602, "alphanum_fraction": 0.512284517288208, "avg_line_length": 34.30303192138672, "blob_id": "625dbafb8a41f8317366e25c0f3e38704567a246", "content_id": "19dd221a2e1adddac7e5302ffd10ec2d2acd815b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12821, "license_type": "no_license", "max_line_length": 129, "num_lines": 363, "path": "/lab2/GradeRetrieval.py", "repo_name": "siddeshbist/4dn4-InternetCommunication", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\n\"\"\"\nEcho Client and Server Classes\n\nT. D. Todd\nMcMaster University\n\nto create a Client: \"python EchoClientServer.py -r client\" \nto create a Server: \"python EchoClientServer.py -r server\" \n\nor you can import the module into another file, e.g., \nimport EchoClientServer\n\n\"\"\"\n\n########################################################################\n\nimport socket\nimport argparse\nimport sys\nimport getpass\nimport hashlib\n\n########################################################################\n# Echo Server class\n########################################################################\n\nclass Server:\n\n # Set the server hostname used to define the server socket address\n # binding. Note that 0.0.0.0 or \"\" serves as INADDR_ANY. i.e.,\n # bind to all local network interface addresses.\n HOSTNAME = \"0.0.0.0\"\n## HOSTNAME = \"127.0.0.1\"\n \n\n # Set the server port to bind the listen socket to.\n PORT = 50000\n\n RECV_BUFFER_SIZE = 1024\n MAX_CONNECTION_BACKLOG = 10\n \n MSG_ENCODING = \"utf-8\"\n\n # Create server socket address. It is a tuple containing\n # address/hostname and port.\n SOCKET_ADDRESS = (HOSTNAME, PORT)\n\n # AVG_COMMANDS = {\"GMA\":\"MIDTERM\",\"GL1A\":\"LAB 1\",\"GL2A\":\"LAB 2\",\"GL3A\":\"LAB 3\",\"GL4A\":\"LAB 4\"}\n\n def __init__(self):\n self.printData()\n self.create_listen_socket()\n self.process_connections_forever()\n\n def printData(self):\n self.midtermAverage = 0\n self.lab1Average = 0\n self.lab2Average = 0\n self.lab3Average = 0\n self.lab4Average = 0\n self.hashedlist = ['.']\n counter = 0\n with open(\"./course_grades_2021.csv\",\"r\")as file:\n print(\"Data read from the CSV file\")\n # print(file.read())\n #[cleaned_line for cleaned_line in [line.strip() for line in file.readlines()] if cleaned_line != '']\n lines = file.readlines() #each row is an item inside one big list\n for line in lines:\n counter+=1\n line = line.split(',') #[,,,,,] become [][][]\n line = [i.strip() for i in line]\n #creation dictionary\n if counter>1:\n self.hashedlist.append(self.hashed_ID_Pass(line[0],line[1]))\n if counter == 12:\n self.midtermAverage = line[4]\n self.lab1Average= line[5]\n self.lab2Average=line[6]\n self.lab3Average=line[7]\n self.lab4Average=line[8]\n \n def hashed_ID_Pass(self,id_num,pwd):\n m = hashlib.sha256()\n m.update(id_num.encode('utf-8'))\n m.update(pwd.encode('utf-8'))\n return m.digest() \n \n def getMidtermAverage(self):\n return str(self.midtermAverage)\n\n def getLabAverage(self,labno):\n if labno == 1:\n return self.lab1Average\n if labno == 2:\n return self.lab2Average\n if labno == 3:\n return self.lab3Average\n if labno == 4:\n return self.lab4Average\n\n def create_listen_socket(self):\n try:\n # Create an IPv4 TCP socket.\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Set socket layer socket options. This allows us to reuse\n # the socket without waiting for any timeouts.\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # Bind socket to socket address, i.e., IP address and port.\n self.socket.bind(Server.SOCKET_ADDRESS)\n\n # Set socket to listen state.\n self.socket.listen(Server.MAX_CONNECTION_BACKLOG)\n \n # display output of csv file on server \n print('\\nData read from CSV File:\\n')\n self.display_csv()\n\n print(\"\\nListening for connections on port {} ...\".format(Server.PORT))\n except Exception as msg:\n print(msg)\n sys.exit(1)\n \n def display_csv(self):\n filename = 'course_grades_2021.csv'\n file = open(filename,'r')\n data = [] \n for line in file.readlines():\n print(line.rstrip('\\n'))\n\n def process_connections_forever(self):\n try:\n while True:\n # Block while waiting for accepting incoming\n # connections. When one is accepted, pass the new\n # (cloned) socket reference to the connection handler\n # function.\n self.connection_handler(self.socket.accept())\n except Exception as msg:\n print(msg)\n except KeyboardInterrupt:\n print()\n finally:\n self.socket.close()\n sys.exit(1)\n\n def connection_handler(self, client):\n connection, address_port = client\n print(\"-\" * 72)\n print(\"Connection received from {}.\".format(address_port))\n\n while True:\n try:\n # Receive bytes over the TCP connection. This will block\n # until \"at least 1 byte or more\" is available.\n recvd_bytes = connection.recv(Server.RECV_BUFFER_SIZE)\n \n # If recv returns with zero bytes, the other end of the\n # TCP connection has closed (The other end is probably in\n # FIN WAIT 2 and we are in CLOSE WAIT.). If so, close the\n # server end of the connection and get the next client\n # connection.\n if len(recvd_bytes) == 0:\n print(\"Closing client connection ... \")\n connection.close()\n break\n \n # Decode the received bytes back into strings. Then output\n # them.\n try:\n recvd_str = recvd_bytes.decode(Server.MSG_ENCODING)\n except:\n recvd_str = recvd_bytes\n\n print(\"Command received from client: \", recvd_str)\n\n #CALL FUNCTIONS\n if recvd_str == \"GMA\":\n sentStr = self.getMidtermAverage()\n elif recvd_str == \"GL1A\":\n sentStr = self.getLabAverage(1)\n elif recvd_str == \"GL2A\":\n sentStr = self.getLabAverage(2)\n elif recvd_str == \"GL3A\":\n sentStr = self.getLabAverage(3)\n elif recvd_str == \"GL4A\":\n sentStr = self.getLabAverage(4)\n else:\n if recvd_str in self.hashedlist:\n #verified the identity\n print(\"Correct password,record found\")\n index = self.hashedlist.index(recvd_str)\n with open(\"./course_grades_2021.csv\",'r')as file:\n sentStr=list(file.readlines())[index]\n \n else:\n print(\"Password Failure\")\n sentStr = \"ID/Password Failure\"\n\n\n\n connection.sendall(sentStr.encode(Server.MSG_ENCODING))\n\n # if recvd_bytes in AVG_COMMANDS.keys():\n # print(\"Received command {} from client\".format(recvd_bytes))\n\n \n # Send the received bytes back to the client.\n #connection.sendall(recvd_bytes)\n print(\"Sent: \", sentStr)\n\n except KeyboardInterrupt:\n print()\n print(\"Closing client connection ... \")\n connection.close()\n break\n\n########################################################################\n# Echo Client class\n########################################################################\n\nclass Client:\n\n # Set the server hostname to connect to. If the server and client\n # are running on the same machine, we can use the current\n # hostname.\n # SERVER_HOSTNAME = socket.gethostbyname('localhost')\n# SERVER_HOSTNAME = socket.gethostbyname('')\n # SERVER_HOSTNAME = '0.0.0.0'\n## SERVER_HOSTNAME = 'localhost'\n SERVER_HOSTNAME = '127.0.0.1'\n \n\n \n RECV_BUFFER_SIZE = 1024\n \n\n AVG_COMMANDS = {\"GMA\":\"MIDTERM\",\"GL1A\":\"LAB 1\",\"GL2A\":\"LAB 2\",\"GL3A\":\"LAB 3\",\"GL4A\":\"LAB 4\"}\n\n\n def __init__(self):\n self.get_socket()\n self.connect_to_server()\n self.send_console_input_forever()\n \n\n def get_socket(self):\n try:\n # Create an IPv4 TCP socket.\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def connect_to_server(self):\n try:\n # Connect to the server using its socket address tuple.\n self.socket.connect((Client.SERVER_HOSTNAME, Server.PORT))\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def get_console_input(self):\n self.flag = 0\n # In this version we keep prompting the user until a non-blank\n # line is entered.\n while True:\n self.input_text = input(\"Enter command (GMA,GL1A,GL2A,GL3A,GL4A,GG): \")\n print(\"Command Entered: \", self.input_text)\n # get grades input \n if self.input_text == \"GG\":\n # ask for ID and password\n id_num = input(\"Enter ID: \")\n pwd = getpass.getpass()\n # hash ID and Passwrod\n self.input_text = self.hashed_ID_Pass(id_num, pwd)\n # print(self.flag)\n self.flag = 1\n if self.input_text != \"\":\n break\n\n def hashed_ID_Pass(self,id_num,pwd):\n m = hashlib.sha256()\n m.update(id_num.encode('utf-8'))\n m.update(pwd.encode('utf-8'))\n return m.digest()\n \n def send_console_input_forever(self):\n while True:\n try:\n self.get_console_input()\n self.connection_send()\n self.connection_receive()\n except (KeyboardInterrupt, EOFError):\n print()\n print(\"Closing server connection ...\")\n self.socket.close()\n sys.exit(1)\n \n def connection_send(self):\n try:\n # Send string objects over the connection. The string must\n # be encoded into bytes objects first.\n if self.flag == 1:\n self.socket.sendall(self.input_text)\n else:\n self.socket.sendall(self.input_text.encode(Server.MSG_ENCODING))\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n def connection_receive(self):\n try:\n # Receive and print out text. The received bytes objects\n # must be decoded into string objects.\n recvd_bytes = self.socket.recv(Client.RECV_BUFFER_SIZE)\n\n # recv will block if nothing is available. If we receive\n # zero bytes, the connection has been closed from the\n # other end. In that case, close the connection on this\n # end and exit.\n if len(recvd_bytes) == 0:\n print(\"Closing server connection ... \")\n self.socket.close()\n sys.exit(1)\n \n assignment = \"\"\n if self.input_text in list(Client.AVG_COMMANDS.keys()):\n assignment = Client.AVG_COMMANDS.get(self.input_text) + \" AVERAGE\"\n else:\n assignment = \"USER INFO\"\n print(\"Fetching {} from Server: {}\".format(assignment, recvd_bytes.decode(Server.MSG_ENCODING)))\n # print(\"Fetching {} average: {}\".format(AVG_COMMANDS.get(self.input_text), recvd_bytes.decode(Server.MSG_ENCODING)))\n\n except Exception as msg:\n print(msg)\n sys.exit(1)\n\n########################################################################\n# Process command line arguments if this module is run directly.\n########################################################################\n\n# When the python interpreter runs this module directly (rather than\n# importing it into another file) it sets the __name__ variable to a\n# value of \"__main__\". If this file is imported from another module,\n# then __name__ will be set to that module's name.\n\nif __name__ == '__main__':\n roles = {'client': Client,'server': Server}\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-r', '--role',\n choices=roles, \n help='server or client role',\n required=True, type=str)\n\n args = parser.parse_args()\n roles[args.role]()\n\n########################################################################\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5859802961349487, "alphanum_fraction": 0.6155531406402588, "avg_line_length": 20.23255729675293, "blob_id": "68bbde900997045e51d22224877296fc0872bf83", "content_id": "dea94359cdc70d2bdea1fe8499176551093bdb9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 913, "license_type": "no_license", "max_line_length": 80, "num_lines": 43, "path": "/lab2/extractData.py", "repo_name": "siddeshbist/4dn4-InternetCommunication", "src_encoding": "UTF-8", "text": "\n# try:\n# file = open(filename,'r')\n# # except FileNotFoundError:\n# # print(\"Creating database:{}\".format(filename))\n# # # file = open(filename, 'w')\n\n\nfilename = 'course_grades_2021.csv'\n\nfile = open(filename,'r')\n\ndata = [] \nfor line in file.readlines():\n # values = line.rstrip('\\n').split(\",\")\n # data.append(values)\n print(line.rstrip('\\n'))\n\nidNum = [] \npassword = [] \nlastName = [] \nfirstName = [] \nmidterm = []\nlab1 = []\nlab2 = []\nlab3 = []\nlab4 = []\n\nfor record in data[1:]:\n idNum.append(record[0]) \n password.append(record[1])\n lastName.append(record[2]) \n firstName.append(record[3]) \n midterm.append(record[4])\n lab1.append(record[5])\n lab2.append(record[6])\n lab3.append(record[7])\n lab4.append(record[8])\n\nkeys = data[0]\nvalues = [idNum, password, lastName, firstName, midterm, lab1, lab2, lab3, lab4]\n\nrecords = dict(zip(keys,values))\nprint(records)" }, { "alpha_fraction": 0.675000011920929, "alphanum_fraction": 0.7250000238418579, "avg_line_length": 12.333333015441895, "blob_id": "7c63c77fefb0af2439ac82f1e73110f4dff41a55", "content_id": "2958af91e6b4f8142423cfe4e61153735acdc79d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 40, "license_type": "no_license", "max_line_length": 28, "num_lines": 3, "path": "/lab3/README.md", "repo_name": "siddeshbist/4dn4-InternetCommunication", "src_encoding": "UTF-8", "text": "# lab3-dn\n\nlab3_FTP.py is final version\n" } ]
12
WhatIfWeDigDeeper/nlpia
https://github.com/WhatIfWeDigDeeper/nlpia
932f9eac54ac3ee3630b6eab73083b2fadb744f6
b674815b0a2da3241ee46f64447b064cb26837d6
536fcebbf2c76fc0443d4eb2e9364bf706d720e1
refs/heads/master
2021-07-15T14:43:48.829646
2017-09-27T00:08:44
2017-09-27T00:08:44
105,465,247
0
0
null
2017-10-01T18:46:10
2017-09-24T15:23:02
2017-09-27T00:10:10
null
[ { "alpha_fraction": 0.6979655623435974, "alphanum_fraction": 0.7183098793029785, "avg_line_length": 29.428571701049805, "blob_id": "459b8f30d1b131a5e0ff3ad00b241749dd5a55ef", "content_id": "92e5cb9dff614f2fa8f3ef15a0cc47a0ca70c3ab", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 639, "license_type": "permissive", "max_line_length": 82, "num_lines": 21, "path": "/nlpia/data/horse_plot.py", "repo_name": "WhatIfWeDigDeeper/nlpia", "src_encoding": "UTF-8", "text": "from __future__ import print_function, unicode_literals, division, absolute_import\nfrom future import standard_library\nstandard_library.install_aliases() # noqa: Counter, OrderedDict, \nfrom builtins import * # noqa\nfrom past.builtins import basestring # noqa\n\nfrom seaborn import plt\nfrom mpl_toolkits.mplot3d import Axes3D # noqa\n\nimport pandas as pd\nnp = pd.np\n\n\nh = pd.read_csv('pointcloud.csv.gz', header=None).values[:, :3]\nh = pd.DataFrame(h, columns='x y z'.split())\nh = h.sample(1000)\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nax.scatter(h.x, h.y, h.z, zdir='z', s=20, c=None, depthshade=True)\nplt.show()\n" } ]
1
jjwindow/PyFields
https://github.com/jjwindow/PyFields
21d720bc0e6e3dafe64d41b765f581c3b0e4b1e6
48e78ebc82a93514653dbc4de89f1b0fb8b9a20d
bec3496cd6adeaba3a702adbafb7f04472f6a85e
refs/heads/main
2023-03-05T04:18:52.649792
2021-02-19T16:22:31
2021-02-19T16:22:31
303,692,092
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.595643937587738, "alphanum_fraction": 0.6051136255264282, "avg_line_length": 24.16666603088379, "blob_id": "2e6d9fb5e86c49e314c6c34e3cdf39139dace11c", "content_id": "4af8a5ca1a6485166d98e7d4997e0b991ff70ff3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1056, "license_type": "no_license", "max_line_length": 78, "num_lines": 42, "path": "/high_res_all_moons.py", "repo_name": "jjwindow/PyFields", "src_encoding": "UTF-8", "text": "from all_funcs import save_moon_trace\nfrom multiprocessing import Pool\nfrom timeit import default_timer as timer\n\n# print(\"Start Unparallel\")\n# start = timer()\n# all_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon', 'Triton']\n# num_fieldlines = 0\n# num_orbit_points = 2\n\n# for moon in all_moons:\n# if moon == 'Triton':\n# num_orbits = 2\n# else:\n# num_orbits = 1\n# save_moon_trace(moon, num_orbit_points, num_orbits, num_fieldlines)\n\n# print(\"End Unparallel\")\n# end = timer()\n# print(end-start)\n\ndef run(moon):\n num_fieldlines = 0\n num_orbit_points = 500\n if moon == 'Triton':\n num_orbits = 2\n else:\n num_orbits = 1\n save_moon_trace(moon, num_orbit_points, num_orbits, num_fieldlines)\n\ndef main():\n all_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon', 'Triton']\n print(\"Start Parallel\")\n start = timer()\n with Pool() as pool:\n pool.map(run, all_moons)\n end = timer()\n print(\"End Parallel\")\n print(end-start)\n\n# if __name__ == '__main__':\n# main()" }, { "alpha_fraction": 0.5658383369445801, "alphanum_fraction": 0.5855782628059387, "avg_line_length": 34.730289459228516, "blob_id": "b6cbbcaa36085223251ed6cdadb9da670f35ea66", "content_id": "b353e903a0287f355ed853af35da38900df77618", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8612, "license_type": "no_license", "max_line_length": 236, "num_lines": 241, "path": "/footpoint_analysis.py", "repo_name": "jjwindow/PyFields", "src_encoding": "UTF-8", "text": "from all_funcs import *\nfrom matplotlib import animation\n\nall_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon', 'Triton'] \n\ndef plot_ang_dev_timeseries(moon, trueFoot_f, trueFoot_b, footpoints_f, footpoints_b, time):\n angs_array = angular_deviation(footpoints_f, trueFoot_f, footpoints_b, trueFoot_b)\n fignames = [f\"{moon} Forwards Angular Deviation\", f\"{moon} Forwards Latitude Deviation\", f\"{moon} Forwards Longitude Deviation\", f\"{moon} Backwards Angular Deviation\", \"Backwards Latitude Deviation\", \"Backwards Longitude Deviation\"]\n T_m, T_p = moon_selector(moon, 'T', 'parent_day')\n T_rel = abs(T_m*T_p/(T_m-T_p))\n time = time/T_rel\n ax = plt.subplot(1,1,1)\n n = int(len(fignames)/2)\n for i, (f_array, name) in enumerate(zip(angs_array, fignames)):\n ax.clear()\n if i >= n:\n break\n f_array = [ang for (pos, ang) in f_array]\n ax.plot(time, f_array, label = name)\n b_array = angs_array[i+3]\n b_array = [ang for (pos, ang) in b_array]\n ax.plot(time, b_array, label = fignames[i+3])\n\n ax.legend()\n plt.show()\n\ndef ang_devs_timeseries(moon_arr):\n\n for i, moon in enumerate(moon_arr):\n\n if moon == 'Miranda':\n orb = 1.05\n elif moon == 'Triton':\n orb = 2\n else:\n orb = 1\n\n with open(f'{moon}/trueFoot_f_40_{orb}_100.npy', 'rb') as file:\n trueFoot_f_arr = np.load(file, allow_pickle=True)\n with open(f'{moon}/trueFoot_b_40_{orb}_100.npy', 'rb') as file:\n trueFoot_b_arr = np.load(file, allow_pickle=True)\n with open(f'{moon}/footpoints_f_40_{orb}_100.npy', 'rb') as file:\n footpoints_f_arr = np.load(file, allow_pickle=True)\n with open(f'{moon}/footpoints_b_40_{orb}_100.npy', 'rb') as file:\n footpoints_b_arr = np.load(file, allow_pickle=True)\n with open(f'{moon}/time_40_{orb}_100.npy', 'rb') as file:\n time = np.load(file, allow_pickle=True)\n\n plot_ang_dev_timeseries(moon, trueFoot_f_arr, trueFoot_b_arr, footpoints_f_arr, footpoints_b_arr, time)\n\n# ang_devs_timeseries(all_moons)\n\n##################### DATA RETRIEVAL ##########################\n\ndef get_coarse_moon_footpoints(moon, n_o_p, n_o):\n \"\"\"\n Get specific data for moon with n_o_p number points per orbit and n_o \n number of orbits.\n \"\"\"\n with open(f'{moon}/trueFoot_f_{n_o_p}_{n_o}_100.npy', 'rb') as file:\n trueFoot_f_arr = np.load(file, allow_pickle=True)\n with open(f'{moon}/trueFoot_b_{n_o_p}_{n_o}_100.npy', 'rb') as file:\n trueFoot_b_arr = np.load(file, allow_pickle=True)\n with open(f'{moon}/time_{n_o_p}_{n_o}_100.npy', 'rb') as file:\n time = np.load(file, allow_pickle=True)\n return trueFoot_f_arr, trueFoot_b_arr, time\n\ndef get_final_moon_footpoints(moon):\n \"\"\"\n Gets forward, backward and time arrays from final (n=500) runs.\n \"\"\"\n if moon == 'Triton':\n num_orbits = 2\n else:\n num_orbits = 1\n with open(f\"Finals/{moon}/trueFoot_f_500_{num_orbits}.npy\", 'rb') as file:\n trueFoot_f_arr = np.load(file, allow_pickle=True)\n with open(f\"Finals/{moon}/trueFoot_b_500_{num_orbits}.npy\", 'rb') as file:\n trueFoot_b_arr = np.load(file, allow_pickle=True)\n with open(f'Finals/{moon}/time_500_{num_orbits}.npy', 'rb') as file:\n time = np.load(file, allow_pickle=True)\n return trueFoot_f_arr, trueFoot_b_arr, time\n\n################# ANIMATIONS #######################\n\ndef animate_footpoints(moon, footpoints, delay):\n \"\"\"\n footpoints = (trueFoot_f_arr, trueFoot_b_arr, time)\n delay (ms) - interval passed to animate\n \"\"\"\n (trueFoot_f_arr, trueFoot_b_arr, time) = footpoints\n P = max(time)\n # Setup figure axes\n fig, ax = plt.subplots()\n ax.set_xlabel(r\"Longitude ($^{\\circ}$)\")\n ax.set_ylabel(r\"Latitude ($^{\\circ}$)\")\n ax.set_xlim(360, 0)\n ax.set_ylim(-90, 90)\n\n latf_arr, longf_arr = [], []\n latb_arr, longb_arr = [], []\n\n # Calc latitudes + longitudes\n for (pos, fp_f), (pos, fp_b) in zip(trueFoot_f_arr, trueFoot_b_arr):\n latf, longf = cartesian2latlong(*fp_f)\n latf_arr.append(latf)\n longf_arr.append(longf + 180)\n latb, longb = cartesian2latlong(*fp_b)\n latb_arr.append(latb)\n longb_arr.append(longb + 180)\n\n linef, = ax.plot(longf_arr, latf_arr, '-', label = 'Forward')\n lineb, = ax.plot(longb_arr, latb_arr, '-', label = 'Backward')\n legend = ax.legend()\n\n def init():\n # Flush figures to begin\n linef.set_ydata([np.nan] * len(longf_arr))\n lineb.set_ydata([np.nan] * len(longb_arr))\n return linef, lineb,\n\n def animate(i):\n linef.set_xdata(longf_arr[:i])\n linef.set_ydata(latf_arr[:i])\n lineb.set_xdata(longb_arr[:i])\n lineb.set_ydata(latb_arr[:i])\n ax.legend()\n ax.set_title(f\"{moon}, t = {round(time[i]/(P), 2)}\" + r\" x $T_{rel}$\")\n return linef, lineb,\n\n ani = animation.FuncAnimation(fig, animate, frames = len(time), blit = True, init_func = init, interval=delay)\n return ani\n\ndef animate_all_moons():\n print(\"Animating Moons...\")\n for moon in all_moons:\n print(f\"...{moon}\")\n n_o = (moon == 'Triton')*2 + (moon != 'Triton')\n fpath = f'Finals/{moon}/Animation_{moon}_500_{n_o}.mp4'\n footpoints = get_final_moon_footpoints(moon)\n ani = animate_footpoints(moon, footpoints, 50)\n ani.save(filename = fpath, writer = 'ffmpeg')\n print(\"Done\")\n\n# animate_all_moons()\n\n######## COORDINATE TIMESERIES ########\n\ndef lat_long_timeseries(moon):\n \n trueFoot_f_arr, trueFoot_b_arr, time = get_final_moon_footpoints(moon)\n T_rel = max(abs(time))\n time /= T_rel\n\n latlongs_f = [cartesian2latlong(*foot) for (pos, foot) in trueFoot_f_arr]\n lat_f, long_f = map(np.asarray, zip(*latlongs_f))\n long_f += 180\n\n latlongs_b = [cartesian2latlong(*foot) for (pos, foot) in trueFoot_b_arr]\n lat_b, long_b = map(np.asarray, zip(*latlongs_b))\n long_b += 180\n\n fig, axs = plt.subplots(2, 1, sharex=True)\n axs[0].clear()\n axs[1].clear()\n axs[0].set_ylim(0, 360)\n axs[0].plot(time, long_f, label='Forwards')\n axs[0].plot(time, long_b, label = 'Backwards')\n axs[0].set_ylabel(r\"Longitude ($^{\\circ}$)\")\n \n axs[1].set_ylim(-90, 90)\n axs[1].plot(time, lat_f, label='Forwards')\n axs[1].plot(time, lat_b, label = 'Backwards')\n axs[1].set_ylabel(r\"Latitude ($^{\\circ}$)\")\n axs[1].set_xlabel(r\"Time ($T_{rel}$)\")\n axs[0].set_title(f\"{moon}\")\n\n axs[0].legend()\n axs[1].legend()\n \n plt.savefig(f\"Finals/{moon}/timeseries_{moon}_lat_long.png\")\n print(f\"{moon} Figure saved.\")\n\n[lat_long_timeseries(moon) for moon in all_moons]\n\ndef footpoint_velocities(moon):\n\n trueFoot_f_arr, trueFoot_b_arr, time = get_final_moon_footpoints(moon)\n T_rel = max(abs(time))\n time /= T_rel\n\n\n\n# fig, axs = plt.subplots(2, 1, sharex=True)\n# n = int(1.5*len(time)/5)\n# axs[0].clear()\n# axs[1].clear()\n# axs[0].plot(time[:n]/(0.2*T_m*n_o), latf_arr[:n], 'b-', label = 'Forwards')\n# axs[0].plot(time[:n]/(0.2*T_m*n_o), latb_arr[:n], 'r-', label = 'Backwards')\n# axs[0].set_ylabel(r\"Latitude ($^{\\circ}$)\")\n# axs[1].plot(time[:n]/(0.2*T_m*n_o), longf_arr[:n], 'b-', label = 'Forwards')\n# axs[1].plot(time[:n]/(0.2*T_m*n_o), longb_arr[:n], 'r-', label = 'Backwards')\n# axs[1].set_ylabel(r\"Longitude ($^{\\circ}$)\")\n# axs[1].set_xlabel(r\"Time /$T_{rel}$\")\n# axs[0].legend()\n# axs[1].legend()\n\n# # plt.show()\n\n######## LOWES SPECTRA & FIELD RATIO DECAY ###########################\n\n# r = np.linspace(1, 25, 1000)\n# ratio = r**-1\n# quad = r**-4\n# dip = r**-3\n\n# uranus_Lowes = np.array([(i+1)*sum(_g**2 for _g in g) for i, g in enumerate(g_U)]) + np.array([(i+1)*sum(_h**2 for _h in h) for i, h in enumerate(h_U)])\n# uranus_Lowes = uranus_Lowes[1:]\n# uranus_Lowes /= uranus_Lowes[0]\n\n# neptune_Lowes = np.array([(i+1)*sum(_g**2 for _g in g) for i, g in enumerate(g_N)]) + np.array([(i+1)*sum(_h**2 for _h in h) for i, h in enumerate(h_N)])\n# neptune_Lowes = neptune_Lowes[1:]\n# neptune_Lowes /= neptune_Lowes[0]\n\n# ratio_U = uranus_Lowes[1]/uranus_Lowes[0]\n# ratio_N = neptune_Lowes[1]/neptune_Lowes[0]\n\n\n\n\n# # plt.plot(range(len(uranus_Lowes)), uranus_Lowes)\n# # plt.plot(range(len(neptune_Lowes)), neptune_Lowes)\n\n# plt.plot(r, ratio_U*ratio, label = 'Uranus')\n# plt.plot(r, ratio_N*ratio, label = 'Neptune')\n# for moon in all_moons:\n# a, = moon_selector(moon, 'a')\n# plt.axvline(a, linestyle = '-.', color = 'k')\n# plt.text(a+0.5, 1, f'{moon}', rotation=90)\n# plt.legend()\n# plt.show()\n\n" }, { "alpha_fraction": 0.6030277609825134, "alphanum_fraction": 0.6269974708557129, "avg_line_length": 36.356021881103516, "blob_id": "36fb32ab6ab9e20402b500cc760b6c2b2bc82784", "content_id": "97f354564c072c6d50b1308fd9a5ce3b0a01e794", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7134, "license_type": "no_license", "max_line_length": 123, "num_lines": 191, "path": "/planet_testing.py", "repo_name": "jjwindow/PyFields", "src_encoding": "UTF-8", "text": "from all_funcs import *\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib as mat\nimport numpy as np\nfrom numpy.linalg import norm\nfrom tqdm import tqdm\nimport os.path\nfrom palettable.wesanderson import Aquatic2_5, Cavalcanti_5\n\n\nfig = plt.figure()\nax = plt.axes(projection = '3d')\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('z')\n\n\nphi_array = np.linspace(0, 2*np.pi, 80)\nfull_field_lines = []\nfor phi in phi_array:\n field_lines = multilines(phi, 12, 0, 2*np.pi, coeffs=uranus, ds=0.01, maxits=100000, plot=False)\n full_field_lines.append(field_lines)\n for field_line in field_lines:\n (x, y, z) = field_line\n ax.plot3D(x, y, z, color=Aquatic2_5.mpl_colors[0])\nfpath = 'Neptune_Fields/Uranus_quadrupole_field_80phi_12theta.npy'\nwith open(fpath, 'wb') as file:\n np.save(file, full_field_lines)\n# all_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon'] \n# for i, moon in enumerate(all_moons):\n# orbital_points, T_arr = orbit(moon, 200, 1, relative=False)\n# x, y, z = spherical2cartesian(orbital_points) \n# ax.plot3D(x, y, z, color=Cavalcanti_5.mpl_colors[i], label = moon)\n# orbital_points, T_arr = orbit('Triton', 200, 1, relative=True)\n# x, y, z = spherical2cartesian(orbital_points) \n# ax.plot3D(x, y, z, color=Cavalcanti_5.mpl_colors[4], label = 'Triton')\n# plt.legend()\n# plt.show()\n\n###### Plotting range of footpoints for a single position on lat-long plot ######\n# phi = 0\n# moon = 'Titania'\n# footpoints, trueFoot = random_footpoints(100, moon, phi, True)\n# fpath = 'Titania_phi-0_n-100.npy'\n# fpathTrue = 'Titania_phi-0_true.npy'\n# with open(fpath, 'wb') as file:\n# np.save(file, footpoints)\n# with open(fpathTrue, 'wb') as file:\n# np.save(file, np.asarray(trueFoot))\n\n# with open(fpath, 'rb') as file:\n# footpoints = np.load(file, allow_pickle=True)\n# with open(fpathTrue, 'rb') as file:\n# trueFoot = np.load(file, allow_pickle=True)\n \n# x, y, z = map(list, zip(*footpoints))\n# lat, longt = cartesian2latlong(x, y, z)\n# trueLat, trueLongt = cartesian2latlong(*trueFoot)\n\n\ndef makeThisAPlottingFunc():\n \"\"\"\n Make this a general plotting func later.\n \"\"\"\n plt.plot(trueLongt, trueLat, 'ro', label = r\"Accepted $g_n^m,~h_n^m$\")\n plt.plot(longt, lat, 'x', label = r\"Random $g_n^m,~h_n^m$\")\n plt.annotate(f\"{moon}, phi = {phi}\", (0.7, 0.05), xycoords = 'axes fraction')\n plt.xlabel(r'Longitude ($^\\circ$)')\n plt.ylabel(r'Latidude ($^\\circ$)')\n plt.legend()\n plt.show()\n\n\n###### Histograms ######\ndef histograms_dep():\n \"\"\"\n Histograms of angular deviation due to uncertainty of harmonic coefficients.\n \"\"\"\n lat_devs = []\n longt_devs = []\n latitudes = []\n longitudes = []\n for fp in footpoints:\n x, y, z = fp\n latitude, longitude = cartesian2latlong(x, y, z)\n latitudes.append(latitude)\n longitudes.append(longitude)\n lat_devs.append(trueLat - latitude)\n longt_devs.append(trueLongt - longitude)\n\n # fig, ax1 = plt.subplots(3, 1, sharex = True)\n fig = plt.figure()\n title_ax = fig.add_subplot(111, frameon=False)\n # hide tick and tick label of the big axis\n plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\n title_ax.set_ylabel(\"Frequency Density\")\n title_ax.set_xlabel(r\"Deviation from Accepted Footpoint ($^\\circ$)\")\n ax1 = fig.add_subplot(3,1,1)\n ax1.hist(lat_devs, bins='auto', color='b', edgecolor='k', label=\"Latitude\") #latitude deviations histogram\n ax1.axvline(mean_lat_dev, color='k', linestyle='dashed', linewidth=1, label = f\"Mean: {round(mean_lat_dev, 3)}\")\n ax1.legend()\n\n ax2 = fig.add_subplot(3,1,2)\n ax2.hist(longt_devs, bins='auto', color='c', edgecolor='k', label = \"Longitude\") #longitude deviations histogram\n ax2.axvline(mean_long_dev, color='k', linestyle='dashed', linewidth=1, label = f\"Mean: {round(mean_long_dev, 3)}\")\n ax2.legend()\n\n ax3 = fig.add_subplot(313)\n ax3.hist([ang*180/np.pi for ang in ang_dev], bins='auto', edgecolor='k', label = 'Absolute Angle')\n ax3.axvline(mean_ang_dev*180/np.pi, color = 'k', linestyle='dashed', label=f\"Mean: {round(mean_ang_dev*180/np.pi, 3)}\")\n ax3.legend()\n plt.show()\n\n# fig, ax2 = plt.subplots(2, 1)\n# ax2[0].hist(latitudes, bins='auto') #latitudes histogram\n# ax2[0].axvline(trueLat, color='k', linestyle='dashed', linewidth=1)\n\n# ax2[1].hist(longitudes, bins='auto') #longitudes histogram\n# ax2[1].axvline(trueLongt, color='k', linestyle='dashed', linewidth=1)\n\n\n\n############# ORBIT TESTING #############\n\n# set up 3d axes\n# ax = plt.axes(projection = '3d')\n# ax.set_xlabel('x')\n# ax.set_ylabel('y')\n# ax.set_zlabel('z')\n\n# with tqdm(total=50, desc=\"FOOTPOINTS\") as bar:\n# footpoints = []\n# for phi in np.linspace(0, 2*np.pi, 50):\n# start_pos = [17.188, (np.pi/2 - 0.00593), phi]\n# x, y, z = field_trace(start_pos, uranus, 0.005, 200000)\n# # point = (x[-1], y[-1], z[-1])\n# # footpoints.append(point)\n# ax.plot3D(x, y, z, color=Cavalcanti_5.mpl_colors[3])\n# bar.update()\n\n# print(len(footpoints))\n# x, y, z = map(list, zip(*footpoints))\n\ndef plot_orbits(moons_list, num, num_orbits, relative = False):\n \"\"\"\n Plots all orbital paths for moons in a 'moons_list', in sidereal or planet\n rest frame, on 3d axes. Also plots planet for scale.\n PARAMS\n -----------------------------------------------------------------------------\n moons_list - array or list; contains only elements of type str, which must\n be one of the Uranian or Neptunian moons.\n relative - bool; if false, orbits plotted in sidereal rest frame. Otherwise,\n plotted in planet rest frame.\n \"\"\"\n # initialise lists\n x_ptp_arr = []\n y_ptp_arr = []\n z_ptp_arr = []\n\n # plot each moon in list\n for i, moon in enumerate(moons_list):\n orbital_points, T_arr = orbit(moon, num, num_orbits, relative=relative) # retrieve orbital path\n x, y, z = spherical2cartesian(orbital_points) # convert to Cartesian\n ax.plot3D(x, y, z, color=Cavalcanti_5.mpl_colors[i], label = moon)\n # save peak-to-peak width of orbital path in each co-ord.\n x_ptp_arr.append(np.ptp(x))\n y_ptp_arr.append(np.ptp(y))\n z_ptp_arr.append(np.ptp(z))\n\n # plot planet.\n u, v = np.mgrid[0:2*np.pi:50j, 0:np.pi:25j]\n a = np.cos(u)*np.sin(v)\n b = np.sin(u)*np.sin(v)\n c = np.cos(v)\n # find maximum bound for each coordinate\n x_len = max(x_ptp_arr)\n y_len = max(y_ptp_arr)\n # maximum z-bound either set by orbit or by planet, must compare both\n z_len = max(max(z_ptp_arr), np.ptp(c))\n # set aspect ratio by largest path in each dimension -> no squished paths or planets\n ax.set_box_aspect((x_len, y_len, z_len))\n ax.plot_wireframe(a, b, c, color=Aquatic2_5.mpl_colors[0])\n plt.legend()\n\n plt.show()\n\n# Plotting the different planetary systems\n# uranus_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon']\n# plot_orbits(uranus_moons)\n# plot_orbits(uranus_moons, 200, 1, True)" }, { "alpha_fraction": 0.4369688332080841, "alphanum_fraction": 0.5092067718505859, "avg_line_length": 23.75438690185547, "blob_id": "951dbd542e28c97db21dd9e5dbb9e95b0a3a63a1", "content_id": "b4d4434d62b2ab86c1b1d143a34c6858e5284760", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1412, "license_type": "no_license", "max_line_length": 88, "num_lines": 57, "path": "/unit_speed_tests.py", "repo_name": "jjwindow/PyFields", "src_encoding": "UTF-8", "text": "# import field_calculator\n# import field_calculator_numba\n# import test_wrapper\n# import stepper\nfrom all_funcs import *\n# from dipole_testing import multilines\nimport numpy as np\nimport time\n\ng_D = np.array([[0., 0., 0., 0.], [1., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]])\nh_D = np.array([[0., 0., 0., 0.], [1., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]])\na_D = 1\n\ndipole = (a_D, g_D, h_D)\n\ndef B_timer(funcname, numits):\n\n co_ords = [[r, th, ph] for r, (th, ph) in \n zip(np.linspace(0.1, 100, numits), \n zip(np.linspace(0.1, np.pi-0.1, numits), \n np.linspace(0.1, np.pi-0.1, numits)))]\n\n if funcname == \"B\":\n func = all_funcs.B\n args = (dipole,)\n elif funcname == \"RK4\":\n func = all_funcs.RK4\n B_0 = field_calculator.B([1, 0.3, 0.], dipole)\n args = (B_0, 0.01, dipole)\n\n t = 0\n for p in co_ords:\n t_0 = time.time()\n func(p, *args)\n t += (time.time()-t_0)\n\n print(f\"{funcname} TIME: \", t/numits)\n\ndef wrapper_timer(numits):\n\n p_0 = [1, 0.1, 0]\n t = 0\n for _ in range(numits):\n t_0 = time.time()\n all_funcs.field_trace(p_0, dipole, 0.01, 100000)\n t += time.time()-t_0\n\n print(\"Wrapper TIME: \", t/numits)\n\n\n\n# functimer(multilines, (50,), 5)\n# # B_timer(\"RK4\", 100000)\n# # B_timer(\"B\", 100000)\n# # B_timer(\"B_numba\", 100000)\n\n# wrapper_timer(10)\n\n" }, { "alpha_fraction": 0.6058847308158875, "alphanum_fraction": 0.620496392250061, "avg_line_length": 40.99159622192383, "blob_id": "3af8fdb847a58dfb106cc4125069b62162496c7c", "content_id": "30198c6937d39ef2614524d88d2497e6a4d008d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4996, "license_type": "no_license", "max_line_length": 212, "num_lines": 119, "path": "/convergence_tests.py", "repo_name": "jjwindow/PyFields", "src_encoding": "UTF-8", "text": "from all_funcs import *\n\n\ndef convergence_test(moon, pos, n_min, n_max, step):\n n_array = np.arange(n_min, n_max+1, step)\n ang_dev_arr_f = []\n ang_dev_arr_b = []\n lat_dev_arr_f = []\n lat_dev_arr_b = []\n long_dev_arr_f = []\n long_dev_arr_b = []\n\n for n in n_array:\n footpoints_f, footpoints_b, trueFoot_f, trueFoot_b = random_footpoints(n, moon, pos, True)\n mean_ang_dev_f, mean_lat_dev_f, mean_long_dev_f, mean_ang_dev_b, mean_lat_dev_b, mean_long_dev_b = angular_deviation([(pos, footpoints_f)], [(pos, trueFoot_f)], [(pos, footpoints_b)], [(pos, trueFoot_b)])\n (pos, ang_dev_f) = mean_ang_dev_f[0]\n ang_dev_arr_f.append(ang_dev_f)\n (pos, ang_dev_b) = mean_ang_dev_b[0]\n ang_dev_arr_b.append(ang_dev_b)\n (pos, lat_dev_f) = mean_lat_dev_f[0]\n lat_dev_arr_f.append(lat_dev_f)\n (pos, lat_dev_b) = mean_lat_dev_b[0]\n lat_dev_arr_b.append(lat_dev_b)\n (pos, long_dev_f) = mean_long_dev_f[0]\n long_dev_arr_f.append(long_dev_f)\n (pos, long_dev_b) = mean_long_dev_b[0]\n long_dev_arr_b.append(long_dev_b)\n \n return n_array, ang_dev_arr_f, ang_dev_arr_b, lat_dev_arr_f, lat_dev_arr_b, long_dev_arr_f, long_dev_arr_b\n\nn_min = 20\nn_max = 250\nstep = 10\n# arrs = convergence_test('titania', [17.07, np.pi/2, np.pi], n_min, n_max, step)\n_fpaths = ['arange', 'angdevs_f', 'angdevs_b', 'latdev_f', 'latdev_b', 'longdev_f', 'longdev_b']\nfpaths = ['Titania/' + path + f'_{n_min}_{n_max}_{step}_phi_pi.npy' for path in _fpaths]\n# for arr, path in zip(list(arrs), fpaths):\n# with open(path, 'wb') as file:\n# np.save(file, arr)\n\nwith open(fpaths[0], 'rb') as file:\n n_array = np.load(file, allow_pickle=True)\nwith open(fpaths[1], 'rb') as file:\n angdevs_f = np.load(file, allow_pickle=True)\nwith open(fpaths[2], 'rb') as file:\n angdevs_b = np.load(file, allow_pickle=True)\nwith open(fpaths[3], 'rb') as file:\n latdev_f = np.load(file, allow_pickle=True)\nwith open(fpaths[4], 'rb') as file:\n latdev_b = np.load(file, allow_pickle=True)\nwith open(fpaths[5], 'rb') as file:\n longdev_f = np.load(file, allow_pickle=True)\nwith open(fpaths[6], 'rb') as file:\n longdev_b = np.load(file, allow_pickle=True)\n\n# n_array = arrs[0]\n# for i in range(1, len(arrs)):\n# plt.plot(n_array, arrs[i], label = _fpaths[i]) \n\ndef forward_backward_plots():\n # Make plot of forward and backward footpoint deviations\n # for ang. dev, lat, long. Comment/uncomment as necessary.\n fig, axs = plt.subplots(2, 1, sharex=True)\n fig.add_subplot(111, frameon=False)\n plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\n plt.xlabel(\"No. random fieldlines\")\n plt.ylabel(r\"Ang. deviation squared ($^{\\circ}$)\")\n # plt.ylabel(\"Angular deviation (rad)\")\n # axs[0].plot(n_array, angdevs_f, label = 'Forward')\n # plt.plot(n_array, angdevs_b, label = 'Backward')\n\n # GROUP BY LAT-LONG\n # axs[0].set_title(\"Latitude\")\n # axs[0].plot(n_array, [lat**2 for lat in latdev_f], label = \"Forward\")\n # axs[0].plot(n_array, [lat**2 for lat in latdev_b], label = \"Backward\")\n # axs[0].legend()\n # axs[1].set_title(\"Longitude\")\n # axs[1].plot(n_array, [long**2 for long in longdev_f], label = \"Forward\")\n # axs[1].plot(n_array, [long**2 for long in longdev_b], label = \"Backward\")\n # axs[1].legend()\n\n # GROUP BY FORWARD-BACKWARD\n axs[0].set_title(\"Field Into Planet\")\n axs[0].plot(n_array, [lat**2 for lat in latdev_f], label = \"Latitude\")\n axs[0].plot(n_array, [long**2 for long in longdev_f], label = \"Longitude\")\n axs[0].legend()\n axs[1].set_title(\"Field Out Of Planet\")\n axs[1].plot(n_array, [lat**2 for lat in latdev_b], label = \"Latitude\")\n axs[1].plot(n_array, [long**2 for long in longdev_b], label = \"Longitude\")\n axs[1].legend()\n # plt.ylabel(\"Longitudinal deviation (rad)\")\n # plt.plot(n_array, longdev_f, label = \"Forward\")\n # plt.plot(n_array, longdev_b, label = \"Backward\")\n plt.show()\n\ndef triple_angle_plots():\n # Make plot of ang, lat, long deviations for both forwards\n # and backwards separately.\n fig, axs = plt.subplots(2,1, sharex=True)\n fig.add_subplot(111, frameon=False)\n # hide tick and tick label of the big axis\n plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\n plt.xlabel(\"Num. Random Fieldlines\")\n plt.ylabel(\"Mean deviation from non-random footpoint (rad)\")\n axs[0].plot(n_array, angdevs_f, label=\"Angle\")\n axs[0].plot(n_array, latdev_f, label = \"Latitude\")\n axs[0].plot(n_array, longdev_f, label = \"Longitude\")\n axs[0].set_title(\"Fieldlines into planet\")\n axs[0].legend()\n axs[1].plot(n_array, angdevs_b, label=\"Angle\")\n axs[1].plot(n_array, latdev_b, label = \"Latitude\")\n axs[1].plot(n_array, longdev_b, label = \"Longitude\")\n axs[1].set_title(\"Fieldlines out of planet\")\n axs[1].legend()\n\n plt.show()\n\n# triple_angle_plots()\nforward_backward_plots()" }, { "alpha_fraction": 0.7894737124443054, "alphanum_fraction": 0.800000011920929, "avg_line_length": 46.5, "blob_id": "2f921b0f805f3105711b57a321bb487304197c2e", "content_id": "38194e75a4da600c7ed13feda5d9c9db49c49b06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 95, "license_type": "no_license", "max_line_length": 83, "num_lines": 2, "path": "/README.md", "repo_name": "jjwindow/PyFields", "src_encoding": "UTF-8", "text": "# PyFields\nMagnetic field line tracer for Python, using RK4 methods with a variable step size.\n" }, { "alpha_fraction": 0.49972519278526306, "alphanum_fraction": 0.5272047519683838, "avg_line_length": 39.31610107421875, "blob_id": "2f2a2ed482ab2dfd360be391a05df867db4afde0", "content_id": "df1af81e5271e1816dc2fad51294f8387285c0b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27293, "license_type": "no_license", "max_line_length": 172, "num_lines": 677, "path": "/all_funcs.py", "repo_name": "jjwindow/PyFields", "src_encoding": "UTF-8", "text": "\"\"\"\nPyFields all_funcs.py\n\n01.11.2020\n\nAll modules in PyFields combined into one file to improve execution time.\n\"\"\"\n\nimport numpy as np\nimport numba\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pandas as pd\nimport warnings\nimport os.path\nfrom palettable.wesanderson import Aquatic2_5, Cavalcanti_5\nfrom numpy.linalg import norm\nimport matplotlib as mat\nimport os\n\n######################### GLOBAL DEFINITIONS #############################\n\n# Uranus Coefficients\ng_U = np.array([[0., 0., 0.], [0.11893, 0.11579, 0.], [-0.06030, -0.12587, 0.00196]])\nh_U = np.array([[0., 0., 0.], [0., -0.15648, 0.], [0., 0.06116, 0.04759]])\ng_U_err = np.array([[0., 0., 0.], [0.001, 0.003, 0.], [0.00550, 0.00610, 0.005]])\nh_U_err = np.array([[0., 0., 0.], [0., 0.0017, 0.], [0., 0.00360, 0.00810]])\na_U = 1\n\nuranus = (a_U, g_U, h_U)\nuranus_uncert = (a_U, g_U_err, h_U_err)\n\n# Neptune Coefficients\ng_N = np.array([[0., 0., 0.], [0.09732, 0.03220, 0.], [0.07448, 0.00664, 0.04499]])\nh_N = np.array([[0., 0., 0.], [0., -0.09889, 0.], [0., 0.11230, -0.00070]])\ng_N_err = np.array([[0., 0., 0.], [0.002, 0.0036, 0.], [0.0113, 0.0112, 0.0084]])\nh_N_err = np.array([[0., 0., 0.], [0., 0.0011, 0.], [0., 0.003, -0.0034]])\na_N = 1\n\nneptune = (a_N, g_N, h_N)\nneptune_uncert = (a_N, g_N_err, h_N_err)\n\n# Dipole coefficients\ng_D = np.array([[0., 0., 0., 0.], [1., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]])\nh_D = np.array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]])\na_D = 1\n\ndipole = (a_D, g_D, h_D)\n\n# Quadrupole coefficients\ng_Q = np.array([[0., 0., 0., 0.], [0., 0., 0., 0.], [1., 0., 0., 0.], [0., 0., 0., 0.]])\nh_Q = np.array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]])\na_Q = 1\n\nquadrupole = (a_Q, g_Q, h_Q)\n\n########################## FIELD CALCULATOR ###############################\n# Field component functions\[email protected]\ndef _B_rad_(r, th, ph, a, g, h):\n \"\"\"\n Radial magnetic field component. Formula from Connerney (1993).\n \"\"\"\n lgd = [[1.,0.,0.,0.], \n [np.cos(th),np.sin(th),0.,0.], \n [(3/2)*((np.cos(th))**2-(1/3)),(3**0.5)*(np.cos(th))*(np.sin(th)),((3**0.5)/2)*(np.sin(th))**2,0.], \n [(5/2)*(np.cos(th))*((np.cos(th))**2 - (9/15)),((5*(3**0.5))/(2**1.5))*(np.sin(th))*((np.cos(th))**2 - (3/15)), \n ((15**0.5)/2)*(np.cos(th))*((np.sin(th))**2),((5**0.5)/(2**1.5))*((np.sin(th))**3)]]\n\n B_rad_result= .0\n for n in range(0,3):\n for m in range(0, n+1):\n B_rad_result += (n+1)*((a/r)**(n+1))*(g[n][m]*np.cos(m*ph) + h[n][m]*np.sin(m*ph))*lgd[n][m]\n return B_rad_result\n\[email protected]\ndef _B_theta_(r, th, ph, a, g, h):\n \"\"\"\n Latitudinal magnetic field component. Formula from Connerney (1993).\n \"\"\"\n lgd_prime = [[0., 0., 0.],\n [-np.sin(th), np.cos(th), 0.],\n [-(3/2)*np.sin(2*th),(3**0.5)*((np.cos(th))**2 - (np.sin(th))**2),((3**0.5)/2)*(np.sin(2*th))]]\n\n B_theta_result= .0\n for n in range(0,3):\n for m in range(0, n+1):\n B_theta_result += -(a/r)**(n+2)*(g[n][m]*np.cos(m*ph) + h[n][m]*np.sin(m*ph))*lgd_prime[n][m]\n return B_theta_result\n\[email protected]\ndef _B_phi_(r, th, ph, a, g, h):\n \"\"\"\n Longitudinal magnetic field component. Formula from Connerney (1993).\n \"\"\"\n lgd = [[1.,0.,0.,0.], \n [np.cos(th),np.sin(th),0.,0.], \n [(3/2)*((np.cos(th))**2-(1/3)),(3**0.5)*(np.cos(th))*(np.sin(th)),((3**0.5)/2)*(np.sin(th))**2,0.], \n [(5/2)*(np.cos(th))*((np.cos(th))**2 - (9/15)),((5*(3**0.5))/(2**1.5))*(np.sin(th))*((np.cos(th))**2 - (3/15)), \n ((15**0.5)/2)*(np.cos(th))*((np.sin(th))**2),((5**0.5)/(2**1.5))*((np.sin(th))**3)]]\n B_phi_result= .0\n for n in range(0,3):\n for m in range(0, n+1):\n B_phi_result += (1/(np.sin(th)))*m*(a/r)**(n+2)*(g[n][m]*np.sin(m*ph) - h[n][m]*np.cos(m*ph))*lgd[n][m]\n return B_phi_result\n\ndef B(p, field_coeffs):\n \"\"\"\n Finds magnetic field strength at given (t, th, ph) co-ords for a given set of harmonic expansion \n coefficients. Returns vector of components as a tuple.\n \"\"\"\n r, th, ph = p[0], p[1], p[2]\n args = (r, th, ph, *field_coeffs)\n out_array = np.array([_B_rad_(*args), _B_theta_(*args), _B_phi_(*args)])\n return out_array\n\n############################## STEPPER #################################\[email protected]\ndef B_mag(B, r, th):\n # r = p[0]\n # th = p[1]\n magnitude = np.sqrt(B[0]**2 + (r * B[1])**2 + (r * np.sin(th) * B[2])**2)\n # magnitude = B[0]\n return magnitude\n\ndef RK4(p_0, B_0, ds, field_coeffs, back = False):\n \"\"\"\n Given starting coordinates r, th, ph, performs an RK4 step of size ds to\n get to follow the field to a new postion vector.\n\n ## ALL ISNTANCES OF PLANET -> field_coeffs ##\n \"\"\"\n r, th, ph = p_0[0], p_0[1], p_0[2]\n # Field vector at starting point\n # take unit vector\n v_0 = B_0/B_mag(B_0, p_0[0], p_0[1])\n \n # First Euler step\n p_1 = p_0 + 0.5*ds*v_0\n B_1 = B(p_1, field_coeffs)\n v_1 = B_1/B_mag(B_1, p_1[0], p_1[1])\n\n # First correction step\n p_2 = p_0 + 0.5*ds*v_1\n B_2 = B(p_2, field_coeffs)\n v_2 = B_2/B_mag(B_2, p_2[0], p_2[1])\n\n # Second correction step\n p_3 = p_0 + ds*v_2\n B_3 = B(p_3, field_coeffs)\n v_3 = B_3/B_mag(B_3, p_3[0], p_3[1])\n\n if not back:\n p_next = p_0 + ds*(v_0 + 2*v_1 + 2*v_2 + v_3)/6\n else:\n p_next = p_0 - ds*(v_0 + 2*v_1 + 2*v_2 + v_3)/6\n\n B_next = B(p_next, field_coeffs)\n\n return p_next, B_next\n\n\n################################### WRAPPER ###################################\n\ndef field_trace(start_pos, field_coeffs, ds, max_iter, axes = \"Cartesian\", back = False):\n \"\"\"\n Function to trace a field line given a starting positon.\n\n PARAMS\n ---------------------------------------------------------------------------------------------------\n start_pos - list or np.array; starting position in spherical coordinates from which to \n trace the field.\n field_coeffs - tuple of lists or np.arrays (2D); 2D array of coefficients for the spherical \n harmonic expansion. Must be of form (a, g, h).\n ds - float; stepsize to trace the field with, taken as a constant for all steps \n currently with intention of adding variable step-size to the tracer.\n max_iter - int; maximum number of iterations.\n\n RETURNS\n ----------------------------------------------------------------------------------------------------\n p_arr - np.array (2D); array of (r, th, ph) coordinates which fall on the traced field\n line.\n B_arr - np.array (2D); array of field vectors for every point in p_arr.\n \"\"\"\n\n B_0 = B(start_pos, field_coeffs)\n p_0 = start_pos\n\n p_arr, B_arr = np.asarray([np.zeros(3) for _ in range(max_iter)]), np.asarray([np.zeros(3) for _ in range(max_iter)])\n p_arr[0] = p_0\n B_arr[0] = B_0\n\n it = 1\n while (p_0[0] >= 1.) and (it < max_iter):\n p_next, B_next = RK4(p_0, B_0, ds, field_coeffs, back)\n p_arr[it] = p_next\n B_arr[it] = B_next\n p_0, B_0 = p_next, B_next\n it += 1\n iter_flag = (it == max_iter)\n\n if (iter_flag):\n return None\n\n p_arr = np.asarray([p for p in p_arr if np.any(p)])[:-1]\n B_arr = np.asarray([b for b in B_arr if np.any(b)])[:-1]\n\n if ((len(p_arr) < 3) or iter_flag):\n return None\n else:\n if axes == \"Cartesian\":\n x, z, y = spherical2cartesian(p_arr)\n return x, y, z\n else:\n return p_arr, B_arr\n\ndef spherical2cartesian(p_arr):\n \"\"\"\n Converts (2d) list of spherical coordinates to 3 (1d) lists of cartesian coordinates for\n use in plotting functions.\n PARAMS\n ---------------------------------------------------------------------------------------\n p_arr - list or numpy array (2D); list of [r, theta, phi] coordinates.\n RETURNS\n ---------------------------------------------------------------------------------------\n x, y, z - lists (1D); list of x-, y-, and z-coordinates (respectively) in Cartesian axes\n corresponding to the same points in space defined by p_arr.\n \"\"\"\n x, z, y = map(list, zip(*[(r*np.sin(theta)*np.cos(phi), r*np.cos(theta), r*np.sin(theta)*np.sin(phi)) for r, theta, phi in zip(p_arr[:, 0], p_arr[:, 1], p_arr[:, 2])]))\n return x, y, z\n\ndef multilines(phi, num, th_min = 0, th_max = 2*np.pi, coeffs = dipole, ds = 0.01, maxits = 100000, plot = True):\n \"\"\"\n Plots 'num' (int) field lines for equally spaced theta values between th_min and th_max.\n Field lines calculated using field coefficients given by coeffs (tuple), stepsize ds (float),\n and terminating after maxits (int). Use plt.show() to display plot after calling.\n \"\"\"\n th_values = np.linspace(th_min, th_max, num, endpoint=False)\n field_lines = []\n with tqdm(total = len(th_values), desc=f\"THETA {round(th_min/np.pi, 2)}*pi TO {round(th_max/np.pi, 2)}*pi\") as bar:\n for th in th_values:\n if th==0 or th==np.pi or th==2*np.pi:\n pass\n else:\n field_line = field_trace([1., th, float(phi)], coeffs, ds, maxits)\n if field_line is not None:\n (x, y, z) = field_line\n if plot:\n if y[0] > y[-1]:\n colour = 'r'\n else:\n colour = 'b'\n plt.plot(x, y, color = colour)\n else:\n field_lines.append(field_line)\n bar.update()\n # field_lines = np.asarray(field_lines)\n return field_lines\n\ndef multiline_3D(num_th, phi_array, th_min, th_max, coeffs = dipole, ds = 0.01, maxits = 100000):\n fig=plt.figure()\n ax = plt.axes(projection = '3d')\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n for phi in phi_array:\n field_lines = multilines(phi, num_th, th_min=th_min, th_max=th_max, coeffs=coeffs, ds=ds, maxits=maxits, plot=False)\n for field_line in field_lines:\n (x, y, z) = field_line\n ax.plot3D(x, y, z, color=Aquatic2_5.mpl_colors[0])\n\n\n##################### ANALYTIC COMPARISONS #######################\n\ndef _analytic_field_point_(th_i, th, field = 'dipole'):\n \"\"\"\n Calculates (x,y) coordinate at th for a field line whith starting coordinate (t, th, ph) = (1, th, 0).\n Also returns rflag, which is True if r <= 1 and false otherwise. This is to terminate calculation.\n \"\"\"\n if field == 'dipole':\n def x(th_i, th):\n return ((np.sin(th)**3)/np.sin(th_i)**2)\n def y(th_i, th):\n return ((np.sin(th)**2 * np.cos(th))/np.sin(th_i)**2)\n else:\n def x(th_i, th):\n return (np.sin(th_i)**2 * np.cos(th_i))**(-0.5) * np.sqrt(np.sin(th)**2 * np.cos(th)) * np.sin(th)\n def y(th_i, th):\n return (np.sin(th_i)**2 * np.cos(th_i))**(-0.5) * np.sqrt(np.sin(th)**2 * np.cos(th)) * np.cos(th)\n \n x, y = x(th_i, th), y(th_i, th)\n rflag = (round((x**2 + y**2), 6) < 1) # Boolean flag - is radial coord < 1?\n # print(rflag)\n return x, y, rflag\n \ndef analytic_field_line(th_i, ds, field = 'dipole'):\n th_range = np.arange(th_i, 2*np.pi, step=ds)\n # th_i_range = np.array([th_i for _ in th_range])\n # x_y_coords = [(x, y) for x, y, rflag in [_analytic_field_point_(th_i, th) for th_i, th in zip(th_i_range, th_range)] if not rflag]\n x_y_coords = []\n j = 0\n rflag = False\n # breakpoint()\n while (not rflag) and (j < len(th_range)):\n x, y, rflag = _analytic_field_point_(th_i, th_range[j], field)\n x_y_coords.append((x, y))\n j += 1\n # breakpoint()\n return x_y_coords\n\ndef _analytic_field_plot(th_min, th_max, numlines, ds, field = 'dipole'):\n th_start = np.linspace(th_min, th_max, numlines, endpoint=False)\n for th_i in th_start:\n coords = analytic_field_line(th_i, ds, field)\n x_arr, y_arr = map(list, zip(*coords))\n plt.plot(x_arr, y_arr, '--', color = 'k')\n\n################# COORDINATE TRANSFORM ########################\n\ndef cartesian2latlong(x, y, z):\n \"\"\"\n Convert 3D Cartesian coordinates to latitude-longitudes for \n 2D projection plots.\n PARAMS\n -----------------------------------------------------------------------\n x, y, z - float; coordinates in planet-centred Cartesian\n system. Axis of planetary rotation aligned along z-axis.\n RETURNS\n -----------------------------------------------------------------------\n lat, long - float;\n \"\"\"\n # Convert lists to arrays for vectorisation.\n # Ignores floats and arrays.\n args = [x, y, z]\n for i, elem in enumerate(args):\n if isinstance(elem, list):\n args[i] = np.asarray(elem)\n \n [x, y, z] = args\n r = np.sqrt(x**2 + y**2 + z**2)\n lat = np.arcsin(z/r)*(180/(np.pi))\n longt = np.arctan2(y, x)*(180/(np.pi))\n\n return lat, longt\n\n##################### MOON SELECTOR ###############################\ndf = pd.read_csv('satellite_properties.csv')\ndf.set_index('Name', inplace=True)\n\ndef moon_selector(moon, *args):\n \"\"\"\n Returns desired parameters for a given moon.\n\n PARAMS\n ------------------------------------------------------------------\n Possible values of 'moon':'\n 'Miranda' - 'Ariel' - 'Umbriel' - 'Titania' - 'Oberon' - 'Triton'\n (not case sensitive.)\n *args:\n Passing no args returns full properties dictionary for that moon.\n Otherwise, *args are keys to return properties for the moon. Each\n argument should be a string.\n Valid args are:\n 'Parent' - 'inc' - 'R' - 'a' - 'T'\n (parent (inclination, (Radius, (scaled (orbital\n planet, str) radians) km) radius) time period)\n\n 'coeffs' - 'uncert'\n Spherical harmonic coefficients for parent planet / associated \n uncertainties.\n (tuple of form (a, g, h).)\n 'parent_day'\n length of a day on the parent planet in units of Earth days\n\n RETURNS\n -------------------------------------------------------------------\n out_dict - dict; of type {'arg' : arg_value, ...} for all 'arg' \n passed as arguments.\n\n Note - Invalid arguments do not raise an error but deploy a warning.\n \"\"\"\n if not isinstance(moon, str):\n raise TypeError(\"Positional argument 'moon' must be of type string.\")\n \n # df = pd.read_csv('satellite_properties.csv')\n # Select coefficients to use \n # df.set_index('Name', inplace=True)\n moon = moon.lower()\n if moon not in df.index.values:\n raise ValueError(\"'moon' must be one of the 5 major Uranian moons or 'triton'.\")\n \n moon_dict = df.loc[moon].to_dict()\n parent = moon_dict['Parent']\n coeffs = (parent == 'Uranus')*uranus + (parent == 'Neptune')*neptune\n coeffs_uncert = (parent == 'Uranus')*uranus_uncert + (parent == 'Neptune')*neptune_uncert\n moon_dict['coeffs'] = coeffs\n moon_dict['uncert'] = coeffs_uncert\n\n if not len(args):\n return moon_dict\n \n out_tup = tuple([moon_dict[arg] for arg in args if arg in moon_dict.keys()])\n\n bad_args = [arg for arg in args if not (arg in moon_dict.keys())]\n\n if len(bad_args):\n warnings.warn(f'The following arguments are not in satellite_proprties and were not returned:\\n {bad_args}')\n\n return out_tup\n\n######################### TIMER #################################\n\nimport time\ndef functimer(func, args, n):\n \"\"\"\n Times a function n times, displays and returns the average time taken.\n 'args' are arguments to pass to the function being timed and should be \n a tuple to be unpacked.\n RETURNS\n ----------------------------------------------------------------------\n\n \"\"\"\n t = 0\n for _ in range(n):\n t_0 = time.time()\n func(*args)\n t += time.time() - t_0\n mean = t/n\n print(f\"{func.__name__} Time ({n} run avg):\\n{mean}\")\n return mean\n\n######################## COEFFICIENT UNCERTAINTIES ######################'''''\n\ndef random_footpoints(n, moon, pos, trueTrace = False):\n \"\"\"\n A function that generates random magnetic field footpoints within the bounds \n of the uncertainties of the magnetic field coefficients. New spherical harmonic \n expansion coefficients are calculated using a pseudorandom number generator, \n and n fieldlines are traced using these for a given moon-planet system at a \n fixed colatitude. The array of footpoints for all fieldlines is returned.\n\n PARAMS\n --------------------------------------------------------------------------------\n n - int; number of random fieldlines to calculate.\n moon - str; name of the moon to calculate footpoint uncertainties for.\n phi - float; value of colatitude at which to start the fieldline.\n trueTrace - bool; trace the fieldline using the accepted g, h coeffs.\n\n RETURNS\n ---------------------------------------------------------------------------------\n footpoints - list; list of tuples, where each tuple is (x, y, z) position of a \n footpoint of a fieldline calculated from the random coefficients.\n \"\"\"\n\n (R, coeffs, uncert) = moon_selector(moon, 'a', 'coeffs', 'uncert')\n # start_pos = [R, np.pi/2, phi]\n (a, g, h) = coeffs\n (a, g_err, h_err) = uncert\n \n # Trace the accepted fieldline if desired\n if trueTrace:\n # x, y, z = field_trace(start_pos, (a, g, h), 0.005, 200000)\n x, y, z = field_trace(pos, coeffs, 0.0075, 133000)\n trueFoot_f = (x[-1], y[-1], z[-1])\n x, y, z = field_trace(pos, coeffs, 0.0075, 133000, back=True)\n trueFoot_b = (x[-1], y[-1], z[-1])\n # initialise footpoints array\n footpoints_f = [0. for _ in range(n)]\n footpoints_b = [0. for _ in range(n)]\n # with tqdm(total=n, desc=f\"{moon}, phi={pos[2]}\") as bar:\n for k in range(n):\n g_new = np.zeros((3,3))\n h_new = np.zeros((3,3))\n\n for i in range(3):\n for j in range(3):\n # Ignore null coefficients\n if g[i][j] == 0.:\n pass\n else:\n # Generate random num between -1 and 1\n r_1 = (np.random.random()-0.5)*2\n # Use random num as multiplier on uncertainty, add\n # to coefficients\n g_new[i][j] = g[i][j] + g_err[i][j]*r_1\n # Repeat with different randnum for h coeffs\n r_2 = (np.random.random() - 0.5)*2\n h_new[i][j] = h[i][j] + h_err[i][j]*r_2\n \n coeffs = (a, g_new, h_new)\n # Trace fieldline with new set of coefficients\n x, y, z = field_trace(pos, coeffs, 0.005, 200000)\n # Take fieldline footpoint\n footpoints_f[k] = (x[-1], y[-1], z[-1])\n x, y, z = field_trace(pos, coeffs, 0.005, 200000, back=True)\n footpoints_b[k] = (x[-1], y[-1], z[-1])\n # bar.update()\n\n if trueTrace:\n return footpoints_f, footpoints_b, trueFoot_f, trueFoot_b\n else:\n return footpoints_f, footpoints_b\n\n######################## ORBIT CALCULATION ########################\n\ndef orbit(moon, num, num_orbits, period = 'relative', relative = False): #num_orbits is how many sidereal orbits #num gives num of points in one sidereal orbit\n \"\"\"\n Function to generate coordinates of an orbital path of a given satellite around its parent.\n Can calculate orbits in the sidereal rest frame or in the planet's rest frame.\n\n PARAMS\n -----------------------------------------------------------------------------------\n moon - str; name of one of the 5 Uranian moons, or Triton.\n num - int; number of time segments to plot per orbit, i.e - time resolution.\n num_orbits - float or int; number of orbits to trace. Only makes a difference for \n inclined orbits with relative = True.\n relative - bool; if false, orbit calculated is in sidereal rest frame, i.e - no\n consideration of planetary rotation. If true, then planetary rotation\n is calculated and orbit given is the path seen from a frame co-rotating \n with the parent planet.\n RETURNS\n ------------------------------------------------------------------------------------\n orbital_points - numpy array; array containing num + 1 points in spherical \n coordinates, determining the orbital path. Each point is a list\n length 3, [r, theta, phi].\n \"\"\"\n # Collect moon parameters\n (R, coeffs, period_moon, period_plan, incl) = moon_selector(moon, 'a', 'coeffs', 'T', 'parent_day', 'inc')\n incl = (np.pi/180) * incl # convert inclination to radians\n omega_moon = (2*np.pi)/period_moon # period -> frequency\n omega_plan = (2*np.pi)/period_plan\n\n if period.lower() == 'relative':\n p = period_moon*period_plan/abs(period_moon-period_plan)\n elif period.lower() == 'sidereal':\n p = period_moon\n else:\n raise Exception(\"Orbit: period arg must be 'relative' or 'sidereal'; relative by default.\")\n\n t_step = p/num \n n = int(num*num_orbits) # number of points to plot - int() covers non-whole num_orbits.\n\n orbital_points= [0 for i in range(n+1)] # initialise output list\n T_arr = [i*t_step for i in range(n+1)]\n\n for i, t in enumerate(T_arr):\n # angular argument of satellite in the plane of its orbit, more correctly called the 'argument of latitude'.\n phi_moon_orbit = omega_moon * t \n # from Adam's eqns:\n theta = np.arccos(np.cos(phi_moon_orbit)*np.sin(np.pi-incl)) \n phi_moon_eq = np.arctan2(-1*np.sin(phi_moon_orbit), np.cos(phi_moon_orbit)*np.cos(np.pi - incl))\n # phi_moon_eq is latitude coordinate in equatorial plane.\n if phi_moon_eq < 0:\n # handles negative arctan2 output\n phi_moon_eq += 2*np.pi\n if relative:\n # changes to planet rest frame\n phi = phi_moon_eq - omega_plan * t\n else:\n phi = phi_moon_eq \n # append point to list\n pos = [R, theta, phi]\n orbital_points[i] = pos\n return np.array(orbital_points), np.array(T_arr)\n\n###### Calculating mean angular error ######\n\ndef angular_deviation(footpoints_f_arr, trueFoot_f_arr, footpoints_b_arr, trueFoot_b_arr):\n\n mean_ang_dev_f = []\n mean_lat_dev_f = []\n mean_long_dev_f = []\n mean_ang_dev_b = []\n mean_lat_dev_b = []\n mean_long_dev_b = []\n\n for i, (pos, trueFoot) in enumerate(trueFoot_f_arr):\n trueLat, trueLongt = cartesian2latlong(*trueFoot)\n ang_dev_f = []\n lat_dev_f = []\n long_dev_f = []\n (pos, fp_arr) = footpoints_f_arr[i]\n for fp in fp_arr:\n # breakpoint()\n lat, longt = cartesian2latlong(*fp)\n ang_dev_f.append(np.arccos(np.dot(fp, trueFoot)/(norm(fp)*norm(trueFoot))))\n lat_dev_f.append(trueLat - lat)\n long_dev_f.append(trueLongt - longt)\n mean_ang_dev_f.append((pos, np.mean(ang_dev_f)))\n mean_lat_dev_f.append((pos, np.mean(lat_dev_f)))\n mean_long_dev_f.append((pos, np.mean(long_dev_f)))\n\n for i, (pos, trueFoot) in enumerate(trueFoot_b_arr):\n trueLat, trueLongt = cartesian2latlong(*trueFoot)\n ang_dev_b = []\n lat_dev_b = []\n long_dev_b = []\n (pos, fp_arr) = footpoints_b_arr[i]\n for fp in fp_arr:\n lat, longt = cartesian2latlong(*fp)\n ang_dev_b.append(np.arccos(np.dot(fp, trueFoot)/(norm(fp)*norm(trueFoot))))\n lat_dev_b.append(trueLat - lat)\n long_dev_b.append(trueLongt - longt)\n mean_ang_dev_b.append((pos, np.mean(ang_dev_b)))\n mean_lat_dev_b.append((pos, np.mean(lat_dev_b)))\n mean_long_dev_b.append((pos, np.mean(long_dev_b)))\n \n return mean_ang_dev_f, mean_lat_dev_f, mean_long_dev_f, mean_ang_dev_b, mean_lat_dev_b, mean_long_dev_b\n\n################## TRACING & SAVING ############################\n\ndef trace_full_orbit(moon, num_orbit_points, num_orbits, num_fieldlines):\n \n orbital_points_arr, T_arr = orbit(moon, num_orbit_points, num_orbits, relative = True)\n l = len(orbital_points_arr)\n \n footpoints_f_arr = [0 for i in range(l)]\n footpoints_b_arr = [0 for i in range(l)]\n trueFoot_f_arr = [0 for i in range(l)]\n trueFoot_b_arr = [0 for i in range(l)]\n\n n = len(orbital_points_arr)\n # with tqdm(total=n, desc=f\"{moon}\") as bar:\n for i, pos in enumerate(orbital_points_arr):\n footpoints_f, footpoints_b, trueFoot_f, trueFoot_b = random_footpoints(num_fieldlines, moon, pos, trueTrace = True)\n footpoints_f_arr[i] = (pos, footpoints_f)\n footpoints_b_arr[i] = (pos, footpoints_b)\n trueFoot_f_arr[i] = (pos, trueFoot_f)\n trueFoot_b_arr[i] = (pos, trueFoot_b)\n # bar.update()\n\n return footpoints_f_arr, footpoints_b_arr, trueFoot_f_arr, trueFoot_b_arr, T_arr\n\ndef save_moon_trace(moon, num_orbit_points, num_orbits, num_fieldlines):\n if num_fieldlines != 0:\n paths = ['footpoints_f', 'footpoints_b', 'trueFoot_f', 'trueFoot_b', 'time']\n all_footpoints = trace_full_orbit(moon, num_orbit_points, num_orbits, num_fieldlines)\n else:\n paths = ['trueFoot_f', 'trueFoot_b', 'time']\n fp_f, fp_b, *all_footpoints = trace_full_orbit(moon, num_orbit_points, num_orbits, num_fieldlines)\n # footpoints_f_arr, footpoints_b_arr, trueFoot_f_arr, trueFoot_b_arr = trace_full_orbit(moon, num_orbit_points, num_orbits, num_fieldlines)\n cdir = os.getcwd()\n dest = os.path.join(cdir, 'Finals', moon)\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n all_footpoints = list(all_footpoints)\n \n for path, footpoint in zip(paths, all_footpoints):\n fpath = f\"Finals/{moon}/{path}_{num_orbit_points}_{num_orbits}.npy\"\n with open(fpath, 'wb') as file:\n np.save(file, footpoint)\n\ndef trace_and_save(moon, num_orbit_points, num_orbits, num_fieldlines, t_s = None, t_f = None):\n\n if (t_s is not None) and (t_f is not None):\n # Use trace partial orbit\n pass\n else:\n # Generate arrays of orbit coordinates and time series \n orbital_points, time = orbit(moon, num_orbit_points, num_orbits, relative = True)\n \n cdir = os.getcwd()\n dest = os.path.join(cdir, 'Finals', moon)\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n paths = ['footpoints_f', 'footpoints_b', 'trueFoot_f', 'trueFoot_b']\n\n for i, pos in enumerate(orbital_points):\n *lists, = random_footpoints(num_fieldlines, moon, pos, trueTrace = True)\n\n for path, list in zip(paths, lists):\n fpath = f\"Finals/{moon}/{path}_{num_orbit_points}_{num_orbits}_{num_fieldlines}_{i}.npy\"\n with open(fpath, mode='wb') as file:\n np.save(file, list)" }, { "alpha_fraction": 0.5720995664596558, "alphanum_fraction": 0.5919837355613708, "avg_line_length": 34.016483306884766, "blob_id": "821035e2e7309c9e55e8351c92e8ed6bfc4bf157", "content_id": "6c5354ed46e7053096374c20b554769688ed9c3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6387, "license_type": "no_license", "max_line_length": 148, "num_lines": 182, "path": "/dipole_testing.py", "repo_name": "jjwindow/PyFields", "src_encoding": "UTF-8", "text": "\"\"\"\nPyFields dipole_testing.py\n\n28.10.2020\n\nTesting model using a dipole\n\"\"\"\n\nfrom all_funcs import *\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib as mat\nimport numpy as np\nfrom tqdm import tqdm\nimport os.path\n\n########################## DIPOLE TEST ###############################\n\n# multiline_plot(50)\n# params = {\n# 'axes.labelsize': 14,\n# 'font.size': 14,\n# 'legend.fontsize': 14,\n# 'xtick.labelsize': 12,\n# 'ytick.labelsize': 12,\n# 'figure.figsize': [8,6]\n# }\n# plt.rcParams.update(params)\n# plt.legend((mat.lines.Line2D([0,0], [1,1], color = 'r'),), ('Traced Dipole, ds = 0.01',))\n# plt.xlabel(\"Distance in x\")\n# plt.ylabel(\"Distance in y\")\n# plt.show()\n\n\n########################## ERROR CALCULATION ##########################\n\ndef dipole_error(num, th_min, th_max, ds, max_iter):\n th_values = np.linspace(th_min, th_max, num, endpoint=False)\n th_returns = []\n for th in th_values:\n if (th < 0.0):\n th_return = -np.pi + abs(th)\n th_returns.append(th_return)\n else:\n th_return = np.pi - th\n th_returns.append(th_return)\n \n th_returns = np.array(th_returns)\n\n deltas = []\n lengths = []\n th_finals = []\n with tqdm(total=len(th_values), desc=\"Tracing Fields...\") as bar:\n for i, th in enumerate(th_values):\n start_pos = [1., th, 0.]\n field = field_trace(start_pos, dipole, ds, max_iter, axes=None)\n if field is not None:\n (p_arr, B_arr) = field\n th_final = p_arr[-1][1]\n th_finals.append(th_final)\n lengths.append(len(p_arr))\n deltas.append(abs(th_final-th_returns[i]))\n else:\n th_values[i] = np.nan\n th_finals.append(np.nan)\n lengths.append(np.nan)\n deltas.append(np.nan)\n bar.update()\n \n deltas = np.array(deltas)\n lengths = np.array(lengths)\n\n return th_values, deltas, lengths\n\ndef multi_step_size(num, th_min, th_max, stepsizes):\n for ds in stepsizes:\n field_lines = multiline_plot(num, th_min, th_max, ds=ds, maxits= int(1e4/ds), plot=False)\n th_values, deltas, lengths = dipole_error(num, th_min, th_max, ds, int(1e4/ds))\n fpath_field = f'/Testing/Dipole/Fieldlines/Dipole_fieldlines_ds_{ds}.npy'\n fpath_errors = f'/Testing/Dipole/Errors/Dipole_errors_ds_{ds}.npy'\n with open(fpath_field, 'wb') as file:\n np.save(file, field_lines)\n with open(fpath_errors, 'wb') as file:\n np.save(file, [th_values, deltas, lengths])\n\n\n\n# fpath = 'dipole_errors_0.01.npy'\n\n\"\"\"\nBelow is how you save and load numpy arrays. If you're reading this, then you don't have\nto run the dipole_error function again! The file will have pulled into your local respository\nso you can just run this file and it will access the data straight away. :)\n\"\"\"\n### RUN THIS BLOCK TO GENERATE DATA AND SAVE IT ###\n### DO THIS ONCE THEN ACCESS SAVED FILE TO SAVE TIME ###\n# th_values, th_returns, deltas, lengths = dipole_error(50, -np.pi/2, np.pi/2, 0.01, 100000)\n# with open(fpath, 'wb') as f:\n# np.save(f, [th_values, th_returns, deltas, lengths])\n\n### RUN THIS BLOCK TO RETRIEVE SAVED DATA ###\n# with open(fpath, 'rb') as f:\n# th_deltas = np.load(f, allow_pickle=True)\n# th_values, th_returns, deltas, lengths = th_deltas\n\n\n#################### PLOTTING #######################\n\nparams = {\n 'axes.labelsize': 14,\n 'font.size': 14,\n 'legend.fontsize': 14,\n 'xtick.labelsize': 12,\n 'ytick.labelsize': 12,\n 'figure.figsize': [8,6]\n }\nplt.rcParams.update(params)\n\n#l = int(len(th_values)/2)\n\n\"\"\"Adjacent plot of angular error (scaled by angular separation) and field line length vs starting theta value, with mean. Only plotted one half \"\"\"\n# fig, ax = plt.subplots(2,1, sharex=True)\n# ax[0].plot(th_values[l:], deltas[l:]/th_gap, label=\"Step Size = 0.01\")\n# ax[0].plot(th_values[l:], [mean_gap for _ in th_values[l:]], label=\"Mean\")\n# ax[0].set_ylabel(r\"(Angular Discrepancy)/$\\Delta\\theta$\", fontsize = 'medium', labelpad = 17)\n# ax[0].legend()\n# ax[1].plot(th_values[l:], lengths[l:], label = \"Step Size = 0.01\")\n# ax[1].set_ylabel(\"Fieldline Length (no. points)\", fontsize='medium')\n# ax[1].set_xlabel(r\"$\\theta$ (rad)\", fontsize = 'medium')\n# plt.legend()\n# plt.rcParams.update(params)\n# plt.show()\n\n\"\"\"Plot of angular error vs starting theta value, with mean. Only plotted one half \"\"\"\n# plt.plot(th_values[l:], deltas[l:], label=\"Step Size = 0.01\")\n# plt.plot(th_values[l:], [mean for _ in th_values[l:]], label=\"Mean\")\n# plt.ylabel(\"Angular Discrepancy\", fontsize = 'medium')\n# plt.xlabel(r\"$\\theta$ (rad)\", fontsize = 'medium')\n# plt.legend()\n# plt.rcParams.update(params)\n# plt.show()\n\n\"\"\"Plot of angular error (scaled by the angular separation) vs starting theta value, with mean. Only plotted one half \"\"\"\n# plt.plot(th_values[l:], deltas[l:]/th_gap, label=\"Step Size = 0.01\")\n# plt.plot(th_values[l:], [mean_gap for _ in th_values[l:]], label=\"Mean\")\n# plt.ylabel(r\"(Angular Discrepancy)/$\\Delta\\theta$\", fontsize = 'medium')\n# plt.xlabel(r\"$\\theta$ (rad)\", fontsize = 'medium')\n# plt.legend()\n# plt.rcParams.update(params)\n# plt.show()\n\n\"\"\"Plot of angular error (scaled by the angular separation) vs field line length (i.e. how many steps taken). Only plotted one half \"\"\"\n# plt.rcParams.update(params)\n# plt.plot(lengths[l:], deltas[l:]/th_gap, label = \"Step Size = 0.01\")\n# plt.xscale('log')\n# plt.xlabel(\"Log(Fieldline Length) [num. points]\")\n# plt.ylabel(r\"(Angular Error)/$\\Delta\\theta$\")\n# plt.legend()\n# plt.show()\n\n\n#################### ANALYTICAL VS PLOTTED #######################\ndef analytic_dipole_plot(numlines):\n theta_start = np.linspace(0, np.pi/2, numlines)\n def y(th, th_i):\n return np.sin(th)**2 * np.cos(th) / np.sin(th_i)**2\n def x(th, th_i):\n return np.sin(th)**3 / np.sin(th_i)**2\n\n for th_i in theta_start:\n coords = [(x(th, th_i), y(th, th_i)) for th in np.linspace(th_i, np.pi - th_i, 200)]\n x_arr, y_arr = map(list, zip(*coords))\n plt.plot(x_arr, y_arr, '-.', color = 'k')\n\n\n# multiline_plot(25, th_max = np.pi/2)\n# ls=(0, (3, 10, 1, 10, 1, 10))\n\n\n#################### 3D DIPOLE PLOT #######################\nmultiline_3D(10, [0., np.pi/3, 2*np.pi/3])\nplt.show()\n\n \n\n\n\n\n" }, { "alpha_fraction": 0.5680926442146301, "alphanum_fraction": 0.6030263304710388, "avg_line_length": 48.119266510009766, "blob_id": "322b7cba585a9a8310bdcf3fbda6f90d338b43a8", "content_id": "a502d96ba3944c0879c5582df520720885e5636d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5353, "license_type": "no_license", "max_line_length": 409, "num_lines": 109, "path": "/satellite_footpoints.py", "repo_name": "jjwindow/PyFields", "src_encoding": "UTF-8", "text": "from all_funcs import *\nfrom all_funcs import _B_rad_\n\n\ndef trace_partial_orbit(moon, orbit_points, num_fieldlines):\n \n l = len(orbit_points)\n \n footpoints_f_arr = [0 for i in range(l)]\n footpoints_b_arr = [0 for i in range(l)]\n trueFoot_f_arr = [0 for i in range(l)]\n trueFoot_b_arr = [0 for i in range(l)]\n\n for i, pos in enumerate(orbit_points):\n footpoints_f, footpoints_b, trueFoot_f, trueFoot_b = random_footpoints(num_fieldlines, moon, pos, trueTrace = True)\n footpoints_f_arr[i] = (pos, footpoints_f)\n footpoints_b_arr[i] = (pos, footpoints_b)\n trueFoot_f_arr[i] = (pos, trueFoot_f)\n trueFoot_b_arr[i] = (pos, trueFoot_b)\n\n return footpoints_f_arr, footpoints_b_arr, trueFoot_f_arr, trueFoot_b_arr\n\n\ndef save_partial_moon_trace(moon, orbit_points, t_s, t_f, num_fieldlines):\n\n paths = ['footpoints_f', 'footpoints_b', 'trueFoot_f', 'trueFoot_b', 'time']\n\n # footpoints_f_arr, footpoints_b_arr, trueFoot_f_arr, trueFoot_b_arr = trace_full_orbit(moon, num_orbit_points, num_orbits, num_fieldlines)\n all_footpoints = trace_partial_orbit(moon, orbit_points, num_fieldlines)\n all_footpoints = list(all_footpoints)\n\n for path, footpoint in zip(paths, all_footpoints):\n fpath = f'{moon}/' + path + f'_partial_{t_s}_{t_f}_{len(orbit_points)}_{num_fieldlines}.npy'\n with open(fpath, 'wb') as file:\n np.save(file, footpoint)\n\ndef all_moon_footpoints():\n all_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon'] \n ax = plt.subplot(1,1,1)\n for i, moon in enumerate(all_moons):\n if moon == 'Miranda':\n orb = 1.05\n else:\n orb = 1\n\n with open(f'{moon}/trueFoot_f_40_{orb}_100.npy', 'rb') as file:\n trueFoot_f_arr = np.load(file, allow_pickle=True)\n with open(f'{moon}/trueFoot_b_40_{orb}_100.npy', 'rb') as file:\n trueFoot_b_arr = np.load(file, allow_pickle=True)\n # with open(f'{moon}/footpoints_f_40_{orb}_100.npy', 'rb') as file:\n # footpoints_f_arr = np.load(file, allow_pickle=True)\n # with open(f'{moon}/footpoints_b_40_{orb}_100.npy', 'rb') as file:\n # footpoints_b_arr = np.load(file, allow_pickle=True)\n # with open(f'{moon}/time_40_{orb}_100.npy', 'rb') as file:\n # T_arr = np.load(file, allow_pickle=True)\n for (pos, fp) in trueFoot_f_arr:\n lat, longt = cartesian2latlong(*fp)\n ax.plot(longt + 180, lat, 'x', color=Cavalcanti_5.mpl_colors[i])\n for (pos, fp) in trueFoot_b_arr:\n lat, longt = cartesian2latlong(*fp)\n ax.plot(longt + 180, lat, 'x', color=Cavalcanti_5.mpl_colors[i])\n ax.set_xlabel(r\"Longitude ($^{\\circ}$)\")\n ax.set_ylabel(r\"Latitude ($^{\\circ}$)\")\n ax.set_xlim(360, 0)\n ax.set_ylim(-90, 90)\n ax.legend((mat.lines.Line2D([0,0], [1,1], color = Cavalcanti_5.mpl_colors[0]), mat.lines.Line2D([0,0], [1,1], color = Cavalcanti_5.mpl_colors[1]), mat.lines.Line2D([0,0], [1,1], color = Cavalcanti_5.mpl_colors[2]), mat.lines.Line2D([0,0], [1,1], color = Cavalcanti_5.mpl_colors[3]), mat.lines.Line2D([0,0], [1,1], color = Cavalcanti_5.mpl_colors[4])), ('Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon'))\n plt.show()\n\ndef time_filter(t_s, t_f, time_array, orbit_points_array):\n _orb_dict = {t : pos for t, pos in zip(time_array, orbit_points_array)}\n orb_dict = dict(filter(lambda elem: (elem[0] > t_s) and (elem[0] < t_f), _orb_dict.items()))\n # print(list(orb_dict.items())[0], list(orb_dict.items())[-1])\n new_orbit_points_array = list(orb_dict.values())\n new_time_array = list(orb_dict.keys())\n return new_time_array, new_orbit_points_array\n\ndef surface_radial_field(planet):\n\n coeffs = (planet == 'Uranus')*uranus + (planet == 'Neptune')*neptune\n # rv, thv, phv = np.meshgrid(np.ones(500), np.linspace(0, np.pi, 500), np.linspace(0, 2*np.pi, 1000))\n # points = [(r, th, ph) for r, (th, ph) in zip(np.ones(10), zip(np.linspace(0, np.pi, 10), np.linspace(0, 2*np.pi, 10)))]\n points = [[(th, ph) for ph in np.linspace(0, 2*np.pi, 500)] for th in np.linspace(0, np.pi, 500)]\n # B_r_mat = _B_rad_(rv, thv, phv, *coeffs)\n B_r_mat = [[_B_rad_(1, *p, *coeffs) for p in row] for row in points]\n plt.imshow(B_r_mat, cmap='magma', extent = [360, 0, -90, 90])\n plt.colorbar()\n plt.show()\n\n# surface_radial_field('Uranus')\n\ndef surface_radial_field_contour(planet):\n\n coeffs = (planet == 'Uranus')*uranus + (planet == 'Neptune')*neptune\n # rv, thv, phv = np.meshgrid(np.ones(500), np.linspace(0, np.pi, 500), np.linspace(0, 2*np.pi, 1000))\n # points = [(r, th, ph) for r, (th, ph) in zip(np.ones(10), zip(np.linspace(0, np.pi, 10), np.linspace(0, 2*np.pi, 10)))]\n points = [[(th, ph) for ph in np.linspace(0, 2*np.pi, 500)] for th in np.linspace(0, np.pi, 500)]\n # B_r_mat = _B_rad_(rv, thv, phv, *coeffs)\n B_r_mat = np.asarray([np.asarray([_B_rad_(1, *p, *coeffs) for p in row]) for row in points])\n print(np.amin(B_r_mat))\n B_r_mat += abs(np.amin(B_r_mat))\n print(np.amin(B_r_mat))\n fig, ax = plt.subplots()\n cs = ax.contourf(np.linspace(360, 0, 500), np.linspace(-90, 90, 500), B_r_mat)\n ax.invert_xaxis()\n # plt.imshow(B_r_mat, cmap='magma', extent = [360, 0, -90, 90])\n plt.colorbar(cs)\n plt.show()\n\n# surface_radial_field('Neptune')" }, { "alpha_fraction": 0.5490347743034363, "alphanum_fraction": 0.599227786064148, "avg_line_length": 28.43181800842285, "blob_id": "44f969961fc0ab0e93126ce83c2cc5b61bc64ae8", "content_id": "0aecea5372507d6eaab454d67e9a440084ebb2d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1295, "license_type": "no_license", "max_line_length": 171, "num_lines": 44, "path": "/quadrupole_testing.py", "repo_name": "jjwindow/PyFields", "src_encoding": "UTF-8", "text": "from all_funcs import *\nimport matplotlib.pyplot as plt\nimport matplotlib as mat\nimport numpy as np\nfrom collections import Counter\n\n\n########################## QUADRUPOLE TEST ###############################\n\nparams = {\n 'axes.labelsize': 14,\n 'font.size': 14,\n 'legend.fontsize': 14,\n 'xtick.labelsize': 12,\n 'ytick.labelsize': 12,\n 'figure.figsize': [8,6]\n }\nplt.rcParams.update(params)\nmultiline_plot(50, coeffs=quadrupole)\nplt.xlabel(\"Distance in x\")\nplt.ylabel(\"Distance in y\")\n#plt.annotate(\"Traced Quadropole, ds=0.01\", xy=(1,1))\nplt.legend((mat.lines.Line2D([0,0], [1,1], color = 'r'),mat.lines.Line2D([0,0], [1,1], color = 'b')), ('Southbound Line','Northbound Line'))\nplt.show()\n\n\n########################## ANALYTICAL FIELD COMPARISON ##########################\n\nparams = {\n 'axes.labelsize': 14,\n 'font.size': 14,\n 'legend.fontsize': 14,\n 'xtick.labelsize': 12,\n 'ytick.labelsize': 12,\n 'figure.figsize': [8,6]\n }\nplt.rcParams.update(params)\n\nfield = 'Quadrupole'\n\nmultiline_plot(25, th_max = np.pi/2, coeffs = quadrupole)\n_analytic_field_plot(0, np.pi/2, 25, 0.001, 'quad')\nplt.legend((mat.lines.Line2D([0,0], [1,1], color = 'r'),mat.lines.Line2D([0,0], [1,1], color = 'k', linestyle='--')), (f'Traced {field}, ds = 0.01',f'Analytical {field}'))\nplt.show()\n" } ]
10
bixdeng/CheckOut
https://github.com/bixdeng/CheckOut
f5fb8a4cf33d2d9b4f03240c3de9bf3be1dbb09f
37cec5d002ffa367145589c02a30272e71525ef6
6f805ad7e5709b560639ab54bf196c2ac62bf881
refs/heads/master
2021-01-22T10:02:03.237608
2015-03-30T03:14:55
2015-03-30T03:14:55
31,332,839
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6691511273384094, "alphanum_fraction": 0.6724637746810913, "avg_line_length": 40.629310607910156, "blob_id": "746f0fc0d1440ab030c14cbf0f9c033a5763dfa8", "content_id": "b36935092e42b4d43024e7db9075a07b1a84645e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 9660, "license_type": "no_license", "max_line_length": 154, "num_lines": 232, "path": "/app/src/main/java/com/group12/syde362/checkout/SingleProductDescrFragment.java", "repo_name": "bixdeng/CheckOut", "src_encoding": "UTF-8", "text": "package com.group12.syde362.checkout;\nimport android.app.Activity;\nimport android.net.Uri;\nimport android.os.Bundle;\n//import android.app.Fragment;\nimport android.support.v4.app.Fragment;\nimport android.support.v4.app.FragmentTransaction;\nimport android.view.LayoutInflater;\nimport android.view.animation.Animation;\nimport android.view.animation.AnimationUtils;\nimport android.widget.TextView;\nimport android.view.View;\nimport android.view.ViewGroup;\nimport org.json.*;\nimport android.util.Log;\nimport android.widget.Toast;\nimport android.widget.Button;\nimport android.view.View.OnClickListener;\n\n/**\n * Created by terencekim on 15-03-18.\n */\npublic class SingleProductDescrFragment extends Fragment{\n // TODO: Rename parameter arguments, choose names that match\n // the fragment initialization parameters, e.g. ARG_ITEM_NUMBER\n private static final String ARG_NAME = \"param1\";\n private static final String ARG_WEIGHT = \"param2\";\n private static final String ARG_PRICE = \"param3\";\n private static final String ARG_QUANTITY = \"1\";\n\n\n\n // TODO: Rename and change types of parameters\n private String weight;\n private String name;\n private String price;\n private Integer quantity = 1;\n private Integer listPosition = 0;\n private Double totalPrice;\n\n private OnFragmentInteractionListener mListener;\n\n /**\n * Use this factory method to create a new instance of\n * this fragment using the provided parameters.\n *\n * @param result Parameter 1.\n * @param param2 Parameter 2.\n * @return A new instance of fragment SingleProductFragment.\n */\n // TODO: Rename and change types and number of parameters\n public static SingleProductDescrFragment newInstance(String name, String price, String weight, Integer quantity) {\n SingleProductDescrFragment fragment = new SingleProductDescrFragment();\n //storeNFCData(result);\n fragment.name = name;\n fragment.price = price;\n fragment.weight = weight;\n fragment.quantity = quantity;\n return fragment;\n\n }\n\n public SingleProductDescrFragment() {\n // Required empty public constructor\n }\n\n @Override\n public void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n calcTotalPrice(quantity, price);\n }\n\n @Override\n public View onCreateView(LayoutInflater inflater, ViewGroup container,\n final Bundle savedInstanceState) {\n // Inflate the layout for this fragment\n final View singleProductDescrView = inflater.inflate(R.layout.fragment_single_product_descr, container, false);\n\n TextView singleProductPrice = (TextView) singleProductDescrView.findViewById(R.id.singleProductPriceDescr);\n TextView singleProductWeight = (TextView) singleProductDescrView.findViewById(R.id.singleProductWeightDescr);\n TextView singleProductName = (TextView) singleProductDescrView.findViewById(R.id.singleProductNameDescr);\n final TextView updatingQuantity = (TextView) singleProductDescrView.findViewById(R.id.updatingQuantityDescr);\n final TextView totalProductPrice = (TextView) singleProductDescrView.findViewById(R.id.totalProductPriceDescr);\n\n\n final Button quantityMinus = (Button) singleProductDescrView.findViewById(R.id.minusDescr);\n final Button quantityPlus = (Button) singleProductDescrView.findViewById(R.id.plusDescr);\n final Animation buttonAnim = AnimationUtils.loadAnimation(getActivity(), R.anim.button_pressed);\n\n\n singleProductName.setText(name);\n singleProductPrice.setText(\"$\" + String.valueOf(price));\n singleProductWeight.setText(String.valueOf(weight) + \" kg\");\n ((TextView) singleProductDescrView.findViewById(R.id.updatingQuantityDescr)).setText(String.valueOf(quantity));\n totalProductPrice.setText(\"$\" + String.valueOf(String.format(\"%.2f\",totalPrice)));\n\n\n\n Button cancelButton = (Button) singleProductDescrView.findViewById(R.id.cancelButtonDescr);\n cancelButton.setOnClickListener(new OnClickListener() {\n public void onClick(View v) {\n //Toast.makeText(getActivity(), \"Cancel\", Toast.LENGTH_SHORT).show();\n //getActivity().getSupportFragmentManager().popBackStack();\n //above line actually removes the single item fragment, which is not what we want.\n // clicking on cancel and then\n android.support.v4.app.FragmentManager fm = getFragmentManager();\n FragmentTransaction ft = fm.beginTransaction();\n ProductFragment itemListFragment = ((MainActivity)getActivity()).getItemListFragment();\n ft.replace(R.id.container, itemListFragment, \"List\");\n ft.commit();\n }\n });\n\n Button updateButton = (Button) singleProductDescrView.findViewById(R.id.updateButton);\n updateButton.setOnClickListener(new OnClickListener() {\n public void onClick(View v) {\n //Toast.makeText(getActivity(), \"Add Item\", Toast.LENGTH_SHORT).show();\n\n ProductFragment itemListFragment = ((MainActivity)getActivity()).getItemListFragment();\n itemListFragment.subtractFromTotalWeight(quantity, weight); //subtracting old weight before adding new weight\n itemListFragment.subtractFromTotalPrice(quantity, price); //subtracting old price before adding new price\n\n Integer newQuantity = (Integer.parseInt((String) ((TextView) singleProductDescrView.findViewById(R.id.updatingQuantityDescr)).getText()));\n quantity = newQuantity;\n android.support.v4.app.FragmentManager fm = getFragmentManager();\n FragmentTransaction ft = fm.beginTransaction();\n updateItemInList();\n ft.replace(R.id.container, ((MainActivity)getActivity()).getItemListFragment(), \"List\");\n ft.commit();\n\n\n }\n });\n\n quantityMinus.setOnClickListener(new OnClickListener() {\n @Override\n public void onClick(View v) {\n Integer current = Integer.valueOf(String.valueOf(updatingQuantity.getText()));\n if (current <= 1){\n quantityMinus.startAnimation(buttonAnim);\n updatingQuantity.setText(String.valueOf(1));\n }\n else{\n quantityMinus.startAnimation(buttonAnim);\n Integer newQuantity = current - 1;\n updatingQuantity.setText(String.valueOf(newQuantity));\n totalProductPrice.setText(\"$\"+String.valueOf(String.format(\"%.2f\",calcTotalPrice(newQuantity, price))));\n\n }\n }\n });\n\n quantityPlus.setOnClickListener(new OnClickListener() {\n @Override\n public void onClick(View v) {\n quantityPlus.startAnimation(buttonAnim);\n Integer current = Integer.valueOf(String.valueOf(updatingQuantity.getText()));\n Integer newQuantity = current + 1;\n updatingQuantity.setText(String.valueOf(newQuantity));\n totalProductPrice.setText(\"$\"+String.valueOf(String.format(\"%.2f\",calcTotalPrice(newQuantity, price))));\n }\n });\n\n return singleProductDescrView;\n }\n\n // TODO: Rename method, update argument and hook method into UI event\n public void onButtonPressed(Uri uri) {\n if (mListener != null) {\n mListener.onFragmentInteraction(uri);\n }\n }\n\n @Override\n public void onAttach(Activity activity) {\n super.onAttach(activity);\n try {\n mListener = (OnFragmentInteractionListener) activity;\n } catch (ClassCastException e) {\n throw new ClassCastException(activity.toString()\n + \" must implement OnFragmentInteractionListener\");\n }\n }\n\n @Override\n public void onDetach() {\n super.onDetach();\n mListener = null;\n }\n\n\n /**\n * This interface must be implemented by activities that contain this\n * fragment to allow an interaction in this fragment to be communicated\n * to the activity and potentially other fragments contained in that\n * activity.\n * <p/>\n * See the Android Training lesson <a href=\n * \"http://developer.android.com/training/basics/fragments/communicating.html\"\n * >Communicating with Other Fragments</a> for more information.\n */\n public interface OnFragmentInteractionListener {\n // TODO: Update argument type and name\n public void onFragmentInteraction(Uri uri);\n }\n\n public void updateItemInList(){\n ProductListItem updatedListItem = new ProductListItem(name, weight, price, quantity);\n ((MainActivity)getActivity()).getItemListFragment().updateTotalPrice(updatedListItem, quantity);\n ((MainActivity)getActivity()).getItemListFragment().updateTotalWeight(updatedListItem, quantity);\n ((MainActivity)getActivity()).getItemListFragment().getProductList().set(listPosition, updatedListItem);\n }\n\n public Double calcTotalPrice(Integer quantity, String price){\n Double unitPrice = Double.valueOf(String.format(\"%.2f\",Double.valueOf(price)));\n Double newTotalPrice = unitPrice * quantity;\n totalPrice = newTotalPrice;\n return newTotalPrice;\n }\n\n\n public String getName(){\n return name;\n }\n public Integer getListPosition() {\n return listPosition;\n }\n public void setListPosition(Integer position){\n listPosition = position;\n }\n\n}\n\n\n" }, { "alpha_fraction": 0.5718085169792175, "alphanum_fraction": 0.5718085169792175, "avg_line_length": 15.30434799194336, "blob_id": "fd94c3c8339c052c4801e9f2c1b6d8737f94bfac", "content_id": "21a4e2ebd49f4369a078102a2ba3ec15c3863179", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 376, "license_type": "no_license", "max_line_length": 73, "num_lines": 23, "path": "/ino/sendweight.php", "repo_name": "bixdeng/CheckOut", "src_encoding": "UTF-8", "text": "<?php\nrequire_once __DIR__ . '/db_connect.php';\n\n$db = new DB_CONNECT();\n\nif(isset($_POST['weight'])) {\n\t$weight = $POST['weight'];\n\n\t // mysql inserting a new row\n\t$query = mysql_query(\"INSERT INTO products(weight) VALUES ('$weight')\");\n \t\n \t$result = $query;\n \tif ($result) \n \t\techo \"hell yeah\";\n \telse\n \t\techo \"well fuck\";\n} else {\n\techo \"shit...\";\n}\n\n// echo $data; \n\n?>\n\n" }, { "alpha_fraction": 0.6401569247245789, "alphanum_fraction": 0.6733238101005554, "avg_line_length": 31.5930233001709, "blob_id": "03008ae379d060e82feefa94ae673afd87d78246", "content_id": "ec5b5be6266c8abe16a9ac906061cdc83a96b4a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2804, "license_type": "no_license", "max_line_length": 100, "num_lines": 86, "path": "/ino/WirelessWeight/WirelessWeight.ino", "repo_name": "bixdeng/CheckOut", "src_encoding": "UTF-8", "text": "#include <SoftwareSerial.h>\n#include <Stepper.h>\n#define RxD 6 //pin that the bluetooth (BT_TX) will transmit to the Arduino (RxD)\n#define TxD 7 //pin that blue (BT_RX) will receive from the Arduino (TxD)\n#define DEBUG_ENABLED 1\n#define RELAY 4\n\nSoftwareSerial blueToothSerial(RxD, TxD);\nconst int numReadings = 10;\n\nint analogPin = A0;\nfloat load;\n\nint sensorValue = 0;\n//int cValueHigh = 0;\n//int cValueLow = 1023;\nint initialValue = 0;\n\nfloat loadA = 2;\nint analogValueA = 6.5;\nfloat loadB = 4;\nint analogValueB = 5;\nfloat analogValueAverage = 0;\n\nvoid setup() {\n Serial.begin(38400);\n // ----SETUP FOR BLUETOOTH----\n pinMode(RxD, INPUT);\n pinMode(TxD, OUTPUT);\n Serial.println(\"Initialize Slave BT...\"); \n setupBlueToothConnection();\n while(millis() < 20000) {\n initialValue = analogRead(analogPin);\n }\n // ----END SETUP FOR BLUETOOTH---- \n Serial.flush(); \n}\n\nvoid loop(){\n char recvChar;\n sensorValue = analogRead(analogPin);\n \n// analogValueAverage = 0.99*analogValueAverage + 0.01*sensorValue;\n \n if(blueToothSerial.available()){\n recvChar = blueToothSerial.read(); \n Serial.print(recvChar);\n load = analogToLoad(sensorValue);\n// load =((sensorValue - initialValue) / 7.6666);\n blueToothSerial.println(load);\n delay(5000);\n switch(recvChar) {\n case 'r': //Read load\n// load =((sensorValue - initialValue) / 7.6666);\n load = analogToLoad(sensorValue);\n blueToothSerial.print(load, 2);\n delay(1000);\n break;\n }\n } \n} \n\nfloat analogToLoad(float analogval) {\n float load = mapfloat(analogval - initialValue, analogValueA, analogValueB, loadA, loadB);\n return load;\n}\n\nfloat mapfloat(float x, float in_min, float in_max, float out_min, float out_max) {\n return (x - in_min)*(out_max - out_min) / (in_max - in_min) + out_min;\n}\n \n//The following code is necessary to setup the bluetooth shield ------copy and paste----------------\nvoid setupBlueToothConnection() {\n blueToothSerial.begin(38400);// BluetoothBee BaudRate to default baud rate 38400\n blueToothSerial.print(\"\\r\\n+STWMOD=0\\r\\n\"); //set the bluetooth work in slave mode\n blueToothSerial.print(\"\\r\\n+STNA=Cart #25\\r\\n\"); //set the bluetooth name as \"Cart #25\"\n blueToothSerial.print(\"\\r\\n+STOAUT=1\\r\\n\"); // Permit Paired device to connect me\n blueToothSerial.print(\"\\r\\n+STAUTO=0\\r\\n\"); // Auto-connection should be forbidden here\n blueToothSerial.print(\"\\r\\n+STPIN=1234\\r\\n\"); //set pin to 1234\n blueToothSerial.print(\"\\r\\n+RTPIN=1234\\r\\n\");//ask to input pin\n delay(2000); // This delay is required.\n blueToothSerial.print(\"\\r\\n+INQ=1\\r\\n\"); //make the slave bluetooth inquirable \n Serial.println(\"The slave bluetooth is inquirable!\");\n delay(2000); // This delay is required.\n blueToothSerial.flush(); \n}\n\n" }, { "alpha_fraction": 0.5552266836166382, "alphanum_fraction": 0.5638688206672668, "avg_line_length": 31.61389923095703, "blob_id": "2a6a48a6783756bf1863d9e1d1e3357ee6e79820", "content_id": "a592620e9af536a73357270f34f5ebce94049646", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 8447, "license_type": "no_license", "max_line_length": 111, "num_lines": 259, "path": "/app/src/main/java/com/group12/syde362/checkout/BluetoothHelper.java", "repo_name": "bixdeng/CheckOut", "src_encoding": "UTF-8", "text": "package com.group12.syde362.checkout;\n\nimport android.bluetooth.BluetoothAdapter;\nimport android.bluetooth.BluetoothClass;\nimport android.bluetooth.BluetoothDevice;\nimport android.bluetooth.BluetoothSocket;\nimport android.os.Handler;\nimport android.os.Looper;\nimport android.os.Message;\nimport android.support.v4.app.Fragment;\nimport android.util.Log;\nimport android.widget.ArrayAdapter;\nimport android.widget.Button;\nimport android.widget.ListView;\nimport android.widget.Toast;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.io.OutputStream;\nimport java.util.ArrayList;\nimport java.util.UUID;\n\n/**\n * Created by Andy Seo on 3/19/2015.\n */\npublic abstract class BluetoothHelper extends Fragment {\n\n ListView listViewPaired;\n ListView listViewDetected;\n ArrayList<String> arrayListpaired;\n Button buttonSearch, buttonOn, buttonOff, buttonWeight;\n ArrayAdapter<String> adapter, detectedAdapter;\n // static HandleSearch handleSeacrh;\n static BluetoothDevice bdDevice;\n BluetoothClass bdClass;\n ArrayList<BluetoothDevice> arrayListPairedBluetoothDevices;\n //private ButtonClicked clicked;\n //ListItemClickedOnPaired listItemClickedOnPaired;\n BluetoothAdapter bluetoothAdapter = null;\n ArrayList<BluetoothDevice> arrayListBluetoothDevices = null;\n //ListItemClicked listItemClicked;\n\n /*\nhttps://github.com/aron-bordin/Android-with-Arduino-Bluetooth/blob/master/Android/Example/BluetoothArduino.java\n */\n OutputStream mOut;\n InputStream mIn;\n private String TAG = \"BluetoothConnector\";\n private BluetoothSocket mBlueSocket;\n boolean connected = false;\n BluetoothDevice mDevice;\n static ConnectThread mConnectThread;\n ConnectedThread mConnectedThread;\n\n // private OnFragmentInteractionListener mListener;\n\n // Message types used by the Handler\n public static final int MESSAGE_WRITE = 1;\n public static final int MESSAGE_READ = 2;\n String readMessage = \"\";\n String readMessage1 = \"\";\n String readMessage2 = \"\";\n SendReceiveBytes sendReceiveBT = null;\n static double measuredWeight;\n\n\n\n /*\n Connecting with Arduino via Bluetooth\n */\n public class ConnectThread extends Thread {\n private final BluetoothSocket mmSocket;\n private final BluetoothDevice mmDevice;\n final UUID MY_UUID = UUID.fromString(\"00001101-0000-1000-8000-00805f9b34fb\");\n\n public ConnectThread(BluetoothDevice device) {\n Log.i(\"ConnectThread\", \"Started\");\n BluetoothSocket tmp = null;\n mmDevice = device;\n try {\n tmp = device.createRfcommSocketToServiceRecord(MY_UUID);\n } catch (IOException e) {\n }\n mmSocket = tmp;\n }\n\n public BluetoothSocket getSocket(){\n Log.i(\"Socket\", \"\"+mmSocket);\n return mmSocket;\n }\n\n public void run() {\n bluetoothAdapter.cancelDiscovery();\n try {\n mmSocket.connect();\n } catch (IOException connectException) {\n try {\n mmSocket.close();\n } catch (IOException closeException) {\n }\n return;\n }\n// mConnectedThread = new ConnectedThread(mmSocket);\n// mConnectedThread.start();\n// Toast.makeText(getActivity(), \"Connected to \"+bdDevice.getName(), Toast.LENGTH_SHORT).show();\n Log.i(\"bluetooth\", \"connected!!\");\n }\n\n public void cancel() {\n try {\n mmSocket.close();\n } catch (IOException e) {\n }\n }\n }\n\n\n public class ConnectedThread extends Thread {\n private final BluetoothSocket mmSocket;\n private final InputStream mmInStream;\n private final OutputStream mmOutStream;\n\n public ConnectedThread(BluetoothSocket socket) {\n Log.i(\"ConnectedThread\", \"Started\");\n mmSocket = socket;\n InputStream tmpIn = null;\n OutputStream tmpOut = null;\n try {\n tmpIn = socket.getInputStream();\n tmpOut = socket.getOutputStream();\n } catch (IOException e) {\n }\n mmInStream = tmpIn;\n mmOutStream = tmpOut;\n }\n\n public void run() {\n sendReceiveBT = new SendReceiveBytes(mmSocket);\n new Thread(sendReceiveBT).start();\n String red = \"r\";\n byte[] myByte = stringToBytesUTFCustom(red);\n sendReceiveBT.write(myByte);\n }\n\n public void cancel() {\n try {\n mmSocket.close();\n } catch (IOException e) {\n }\n }\n }\n\n\n public static byte[] stringToBytesUTFCustom(String str) {\n char[] buffer = str.toCharArray();\n byte[] b = new byte[buffer.length << 1];\n for (int i = 0; i < buffer.length; i++) {\n int bpos = i << 1;\n b[bpos] = (byte) ((buffer[i] & 0xFF00) >> 8);\n b[bpos + 1] = (byte) (buffer[i] & 0x00FF);\n }\n return b;\n }\n\n\n private class SendReceiveBytes implements Runnable {\n private BluetoothSocket btSocket;\n private InputStream btInputStream = null;\n private OutputStream btOutputStream = null;\n String TAG = \"SendReceiveBytes\";\n\n public SendReceiveBytes(BluetoothSocket socket) {\n btSocket = socket;\n try {\n btInputStream = btSocket.getInputStream();\n btOutputStream = btSocket.getOutputStream();\n } catch (IOException streamError) {\n Log.e(TAG, \"Error when getting input or output Stream\");\n }\n }\n\n public void run() {\n byte[] buffer = new byte[1024]; // buffer store for the stream\n int bytes; // bytes returned from read()\n\n // Keep listening to the InputStream until an exception occurs\n while (true) {\n try {\n // Read from the InputStream\n bytes = btInputStream.read(buffer);\n Log.e(TAG, \"Failed to read\");\n // Send the obtained bytes to the UI activity\n mHandler.obtainMessage(MESSAGE_READ, bytes, -1, buffer)\n .sendToTarget();\n } catch (IOException e) {\n Log.e(TAG, \"Error reading from btInputStream\");\n e.printStackTrace();\n break;\n }\n }\n }\n\n public void write(byte[] bytes) {\n try {\n btOutputStream.write(bytes);\n }\n catch (IOException e) {\n Log.e(TAG, \"Error when writing to btOutputStream\");\n e.printStackTrace();\n }\n }\n\n\n\n// public void getWeight() {\n// mConnectedThread = new ConnectedThread(mConnectThread.getSocket());\n// mConnectedThread.start();\n// }\n\n\n\n /*\nhttp://stackoverflow.com/questions/23540754/send-data-from-arduino-to-android-app-via-bluetooth\n */\n // The Handler that gets information back from the Socket\n\n\n private final Handler mHandler;\n\n {\n mHandler = new Handler(Looper.getMainLooper()) {\n @Override\n public void handleMessage(Message msg) {\n switch (msg.what) {\n case MESSAGE_WRITE:\n //Do something when writing\n break;\n case MESSAGE_READ:\n //Get the bytes from the msg.obj\n byte[] readBuf = (byte[]) msg.obj;\n // construct a string from the valid bytes in the buffer\n readMessage1 = new String(readBuf, 0, msg.arg1);\n readMessage = readMessage + readMessage1;\n \n measuredWeight = Double.parseDouble(readMessage);\n Log.i(\"Weight String: \", \"\" + readMessage);\n Log.i(\"Weight Double: \", \"\" + measuredWeight);\n Toast.makeText(getActivity(), \"\"+readMessage, Toast.LENGTH_SHORT).show();\n break;\n }\n }\n };\n }\n }\n\n\n\n\n}\n" }, { "alpha_fraction": 0.6849015355110168, "alphanum_fraction": 0.7352297306060791, "avg_line_length": 25.764705657958984, "blob_id": "929e6b20a9e4cff8f5e4361bf09df0c6a17fcd8b", "content_id": "7aa01a1fc1f12ebc7e806fad071e5739cd27e499", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 457, "license_type": "no_license", "max_line_length": 67, "num_lines": 17, "path": "/ino/main.py", "repo_name": "bixdeng/CheckOut", "src_encoding": "UTF-8", "text": "import serial\nimport urllib\nimport urllib2\n\nser = serial.Serial('/dev/tty.usbmodem1411', 9600)\nurl = 'http://192.168.43.196/android_connect/sendweight.php'\n\n# while True:\nvalue = ser.readlcdine()\nprint value\nquery_args = { 'weight': value }\nencoded_args = urllib.urlencode(query_args)\nreq = urllib2.Request(url, encoded_args)\nreq.add_header(\"Content-type\", \"application/x-www-form-urlencoded\")\npage = urllib2.urlopen(req).read()\n# print page\nsys.exit(0);\n\n\n" }, { "alpha_fraction": 0.5794855356216431, "alphanum_fraction": 0.5829703211784363, "avg_line_length": 35.2878532409668, "blob_id": "2c182dbd21da9b7747cde66e63659ad707e6892c", "content_id": "2821c25d5c8d0e6c1a429a690179539374a5f950", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 21809, "license_type": "no_license", "max_line_length": 122, "num_lines": 601, "path": "/app/src/main/java/com/group12/syde362/checkout/SettingsFragment.java", "repo_name": "bixdeng/CheckOut", "src_encoding": "UTF-8", "text": "package com.group12.syde362.checkout;\n\nimport android.app.Activity;\nimport android.bluetooth.BluetoothAdapter;\nimport android.bluetooth.BluetoothClass;\nimport android.bluetooth.BluetoothDevice;\nimport android.bluetooth.BluetoothSocket;\nimport android.content.BroadcastReceiver;\nimport android.content.Context;\nimport android.content.Intent;\nimport android.content.IntentFilter;\nimport android.net.Uri;\nimport android.os.Bundle;\n//import android.app.Fragment;\nimport android.os.Message;\nimport android.support.v4.app.Fragment;\nimport android.support.v4.app.FragmentManager;\nimport android.util.Log;\nimport android.view.LayoutInflater;\nimport android.view.View;\nimport android.view.ViewGroup;\nimport android.widget.AdapterView;\nimport android.widget.ArrayAdapter;\nimport android.widget.Button;\nimport android.widget.ListView;\nimport android.widget.TextView;\nimport android.widget.Toast;\n\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.io.OutputStream;\nimport java.lang.reflect.Method;\nimport java.util.ArrayList;\nimport java.util.Set;\nimport java.util.UUID;\nimport android.os.Handler;\nimport java.util.logging.LogRecord;\n\n\n/**\n * A simple {@link Fragment} subclass.\n * Activities that contain this fragment must implement the\n * {@link SettingsFragment.OnFragmentInteractionListener} interface\n * to handle interaction events.\n * Use the {@link SettingsFragment#newInstance} factory method to\n * create an instance of this fragment.\n */\npublic class SettingsFragment extends BluetoothHelper {\n // TODO: Rename parameter arguments, choose names that match\n // the fragment initialization parameters, e.g. ARG_ITEM_NUMBER\n\n /**\n * Called when the activity is first created.\n */\n// COMMENTED OUT (BluetoothHelper)\n// ListView listViewPaired;\n// ListView listViewDetected;\n// ArrayList<String> arrayListpaired;\n// Button buttonSearch, buttonOn, buttonOff, buttonWeight;\n// ArrayAdapter<String> adapter, detectedAdapter;\n// // static HandleSearch handleSeacrh;\n// BluetoothDevice bdDevice;\n// BluetoothClass bdClass;\n// ArrayList<BluetoothDevice> arrayListPairedBluetoothDevices;\n private ButtonClicked clicked;\n ListItemClickedOnPaired listItemClickedOnPaired;\n// BluetoothAdapter bluetoothAdapter = null;\n// ArrayList<BluetoothDevice> arrayListBluetoothDevices = null;\n ListItemClicked listItemClicked;\n /*\n https://github.com/aron-bordin/Android-with-Arduino-Bluetooth/blob/master/Android/Example/BluetoothArduino.java\n */\n// OutputStream mOut;\n// InputStream mIn;\n// private String TAG = \"BluetoothConnector\";\n// private BluetoothSocket mBlueSocket = null;\n// private boolean connected = false;\n// BluetoothDevice mDevice;\n// ConnectThread mConnectThread;\n// ConnectedThread mConnectedThread;\n//\n private OnFragmentInteractionListener mListener;\n//\n// // Message types used by the Handler\n// public static final int MESSAGE_WRITE = 1;\n// public static final int MESSAGE_READ = 2;\n// String readMessage = \"\";\n// SendReceiveBytes sendReceiveBT = null;\n\n /**\n * Use this factory method to create a new instance of\n * this fragment using the provided parameters.\n *\n * @param param1 Parameter 1.\n * @param param2 Parameter 2.\n * @return A new instance of fragment SettingsFragment.\n */\n // TODO: Rename and change types and number of parameters\n public static SettingsFragment newInstance(String param1, String param2) {\n SettingsFragment fragment = new SettingsFragment();\n Bundle args = new Bundle();\n fragment.setArguments(args);\n return fragment;\n }\n\n public SettingsFragment() {\n // Required empty public constructor\n }\n\n\n public void onCreate(LayoutInflater inflater, Bundle savedInstanceState, ViewGroup container) {\n super.onCreate(savedInstanceState);\n if (getArguments() != null) {\n }\n\n FragmentManager fm = getFragmentManager();\n fm.addOnBackStackChangedListener(new FragmentManager.OnBackStackChangedListener() {\n @Override\n public void onBackStackChanged() {\n FragmentManager fm = getActivity().getSupportFragmentManager();\n fm.popBackStack();\n }\n });\n\n }\n\n @Override\n public View onCreateView(LayoutInflater inflater, ViewGroup container,\n Bundle savedInstanceState) {\n View view = inflater.inflate(R.layout.fragment_settings, container, false);\n // take an instance of BluetoothAdapter - Bluetooth radio\n listViewDetected = (ListView) view.findViewById(R.id.listViewDetected);\n listViewPaired = (ListView) view.findViewById(R.id.listViewPaired);\n buttonSearch = (Button) view.findViewById(R.id.buttonSearch);\n buttonOn = (Button) view.findViewById(R.id.buttonOn);\n buttonOff = (Button) view.findViewById(R.id.buttonOff);\n buttonWeight = (Button) view.findViewById(R.id.buttonWeight);\n arrayListpaired = new ArrayList<String>();\n bluetoothAdapter = BluetoothAdapter.getDefaultAdapter();\n clicked = new ButtonClicked();\n// handleSeacrh = new HandleSearch();\n arrayListPairedBluetoothDevices = new ArrayList<BluetoothDevice>();\n /*\n * the above declaration is just for getting the paired bluetooth devices;\n * this helps in the removing the bond between paired devices.\n */\n listItemClickedOnPaired = new ListItemClickedOnPaired();\n arrayListBluetoothDevices = new ArrayList<BluetoothDevice>();\n adapter = new ArrayAdapter<String>(getActivity(), android.R.layout.simple_list_item_1, arrayListpaired);\n detectedAdapter = new ArrayAdapter<String>(getActivity(), android.R.layout.simple_list_item_single_choice);\n listViewDetected.setAdapter(detectedAdapter);\n listItemClicked = new ListItemClicked();\n detectedAdapter.notifyDataSetChanged();\n listViewPaired.setAdapter(adapter);\n // Inflate the layout for this fragment\n return view;\n }\n\n // TODO: Rename method, update argument and hook method into UI event\n public void onButtonPressed(Uri uri) {\n if (mListener != null) {\n mListener.onFragmentInteraction(uri);\n }\n }\n\n @Override\n public void onAttach(Activity activity) {\n super.onAttach(activity);\n try {\n mListener = (OnFragmentInteractionListener) activity;\n } catch (ClassCastException e) {\n throw new ClassCastException(activity.toString()\n + \" must implement OnFragmentInteractionListener\");\n }\n }\n\n @Override\n public void onStart() {\n // TODO Auto-generated method stub\n super.onStart();\n getPairedDevices();\n buttonOn.setOnClickListener(clicked);\n buttonSearch.setOnClickListener(clicked);\n buttonOff.setOnClickListener(clicked);\n buttonWeight.setOnClickListener(clicked);\n listViewDetected.setOnItemClickListener(listItemClicked);\n listViewPaired.setOnItemClickListener(listItemClickedOnPaired);\n if (mDevice != null) {\n Log.i(\"mDevice onStart\", \"\" + mDevice.getName());\n }\n }\n\n private void getPairedDevices() {\n Set<BluetoothDevice> pairedDevice = bluetoothAdapter.getBondedDevices();\n if (pairedDevice.size() > 0) {\n for (BluetoothDevice device : pairedDevice) {\n arrayListpaired.add(device.getName() + \"\\n\" + device.getAddress());\n arrayListPairedBluetoothDevices.add(device);\n mDevice = device;\n }\n }\n adapter.notifyDataSetChanged();\n }\n\n\n// /*\n// Connecting with Arduino via Bluetooth\n// */\n// private class ConnectThread extends Thread {\n// private final BluetoothSocket mmSocket;\n// private final BluetoothDevice mmDevice;\n// final UUID MY_UUID = UUID.fromString(\"00001101-0000-1000-8000-00805f9b34fb\");\n//\n// public ConnectThread(BluetoothDevice device) {\n// Log.i(\"ConnectThread\", \"Started\");\n// BluetoothSocket tmp = null;\n// mmDevice = device;\n// try {\n// tmp = device.createRfcommSocketToServiceRecord(MY_UUID);\n// } catch (IOException e) {\n// }\n// mmSocket = tmp;\n// }\n//\n// public BluetoothSocket getSocket(){\n// return mmSocket;\n// }\n//\n// public void run() {\n// Log.i(\"ConnectThread\", \"Ran\");\n// bluetoothAdapter.cancelDiscovery();\n// try {\n// mmSocket.connect();\n// } catch (IOException connectException) {\n// try {\n// mmSocket.close();\n// } catch (IOException closeException) {\n// }\n// return;\n// }\n//// mConnectedThread = new ConnectedThread(mmSocket);\n//// mConnectedThread.start();\n// }\n//\n// public void cancel() {\n// try {\n// mmSocket.close();\n// } catch (IOException e) {\n// }\n// }\n// }\n\n// /*\n// http://stackoverflow.com/questions/23540754/send-data-from-arduino-to-android-app-via-bluetooth\n// */\n// // The Handler that gets information back from the Socket\n// private final Handler mHandler = new Handler() {\n// @Override\n// public void handleMessage(Message msg) {\n// switch (msg.what) {\n// case MESSAGE_WRITE:\n// //Do something when writing\n// break;\n// case MESSAGE_READ:\n// //Get the bytes from the msg.obj\n// byte[] readBuf = (byte[]) msg.obj;\n// // construct a string from the valid bytes in the buffer\n// readMessage = new String(readBuf, 0, msg.arg1);\n// Log.i(\"Weight: \", \"\"+readMessage);\n// break;\n// }\n// }\n// };\n\n /* DATA TRANSFER\n Writing and Reading to/from Arduino\n */\n// private class ConnectedThread extends Thread {\n// private final BluetoothSocket mmSocket;\n// private final InputStream mmInStream;\n// private final OutputStream mmOutStream;\n//\n// public ConnectedThread(BluetoothSocket socket) {\n// Log.i(\"ConnectedThread\", \"Started\");\n// mmSocket = socket;\n// InputStream tmpIn = null;\n// OutputStream tmpOut = null;\n// try {\n// tmpIn = socket.getInputStream();\n// tmpOut = socket.getOutputStream();\n// } catch (IOException e) {\n// }\n// mmInStream = tmpIn;\n// mmOutStream = tmpOut;\n// }\n//\n// public void run() {\n// sendReceiveBT = new SendReceiveBytes(mmSocket);\n// new Thread(sendReceiveBT).start();\n// String red = \"r\";\n// byte[] myByte = stringToBytesUTFCustom(red);\n// sendReceiveBT.write(myByte);\n// }\n//\n// public void cancel() {\n// try {\n// mmSocket.close();\n// } catch (IOException e) {\n// }\n// }\n// }\n\n\n// public static byte[] stringToBytesUTFCustom(String str) {\n// char[] buffer = str.toCharArray();\n// byte[] b = new byte[buffer.length << 1];\n// for (int i = 0; i < buffer.length; i++) {\n// int bpos = i << 1;\n// b[bpos] = (byte) ((buffer[i] & 0xFF00) >> 8);\n// b[bpos + 1] = (byte) (buffer[i] & 0x00FF);\n// }\n// return b;\n// }\n\n // private BluetoothSocket btSocket;\n// private InputStream btInputStream = null;\n// ;\n// private OutputStream btOutputStream = null;\n// String TAG = \"SendReceiveBytes\";\n//\n// public SendReceiveBytes(BluetoothSocket socket) {\n// btSocket = socket;\n// try {\n// btInputStream = btSocket.getInputStream();\n// btOutputStream = btSocket.getOutputStream();\n// } catch (IOException streamError) {\n// Log.e(TAG, \"Error when getting input or output Stream\");\n// }\n// }\n//\n// public void run() {\n// private class SendReceiveBytes implements Runnable {\n// byte[] buffer = new byte[1024]; // buffer store for the stream\n// int bytes; // bytes returned from read()\n//\n// // Keep listening to the InputStream until an exception occurs\n// while (true) {\n// try {\n// // Read from the InputStream\n// bytes = btInputStream.read(buffer);\n// // Send the obtained bytes to the UI activity\n// mHandler.obtainMessage(MESSAGE_READ, bytes, -1, buffer)\n// .sendToTarget();\n// } catch (IOException e) {\n// Log.e(TAG, \"Error reading from btInputStream\");\n// break;\n// }\n// }\n// }\n//\n// public void write(byte[] bytes) {\n// try {\n// btOutputStream.write(bytes);\n// }\n// catch (IOException e) {\n// Log.e(TAG, \"Error when writing to btOutputStream\");\n// }\n// }\n// }\n\n\n class ListItemClicked implements AdapterView.OnItemClickListener {\n @Override\n public void onItemClick(AdapterView<?> parent, View view, int position, long id) {\n // TODO Auto-generated method stub\n bdDevice = arrayListBluetoothDevices.get(position);\n //bdClass = arrayListBluetoothDevices.get(position);\n //Log.i(\"Log\", \"The dvice : \" + bdDevice.toString());\n /*\n * here below we can do pairing without calling the callthread(), we can directly call the\n * connect(). but for the safer side we must usethe threading object.\n */\n //callThread();\n //connect(bdDevice);\n\n Log.i(\"Device Name: \", \"\" + bdDevice.getName());\n mConnectThread = new ConnectThread(bdDevice);\n mConnectThread.start();\n\n\n// Boolean isBonded = false;\n// try {\n// isBonded = createBond(bdDevice);\n// if(isBonded)\n// {\n// //arrayListpaired.add(bdDevice.getName()+\"mout\\n\"+bdDevice.getAddress());\n// //adapter.notifyDataSetChanged();\n// getPairedDevices();\n// adapter.notifyDataSetChanged();\n// }\n// } catch (Exception e) {\n// e.printStackTrace();\n// }//connect(bdDevice);\n//\n// Log.i(\"Log\", \"The bond is created: \"+isBonded);\n connected = true;\n }\n }\n\n class ListItemClickedOnPaired implements AdapterView.OnItemClickListener {\n @Override\n public void onItemClick(AdapterView<?> parent, View view, int position, long id) {\n bdDevice = arrayListPairedBluetoothDevices.get(position);\n try {\n Boolean removeBonding = removeBond(bdDevice);\n if (removeBonding) {\n arrayListpaired.remove(position);\n adapter.notifyDataSetChanged();\n }\n\n\n Log.i(\"Log\", \"Removed\" + removeBonding);\n } catch (Exception e) {\n // TODO Auto-generated catch block\n e.printStackTrace();\n }\n }\n }\n\n\n/* private Boolean connect(BluetoothDevice bdDevice) {\n Boolean bool = false;\n try {\n Log.i(\"Log\", \"service method is called \");\n Class cl = Class.forName(\"android.bluetooth.BluetoothDevice\");\n Class[] par = {};\n Method method = cl.getMethod(\"createBond\", par);\n Object[] args = {};\n bool = (Boolean) method.invoke(bdDevice);//, args);// this invoke creates the detected devices paired.\n //Log.i(\"Log\", \"This is: \"+bool.booleanValue());\n// Log.i(\"Log\", \"devicesss: \"+bdDevice.getName());\n\n } catch (Exception e) {\n Log.i(\"Log\", \"Inside catch of serviceFromDevice Method\");\n e.printStackTrace();\n }\n return bool.booleanValue();\n }*/\n\n\n\n\n public boolean removeBond(BluetoothDevice btDevice)\n throws Exception {\n Class btClass = Class.forName(\"android.bluetooth.BluetoothDevice\");\n Method removeBondMethod = btClass.getMethod(\"removeBond\");\n Boolean returnValue = (Boolean) removeBondMethod.invoke(btDevice);\n return returnValue.booleanValue();\n }\n\n\n/* public boolean createBond(BluetoothDevice btDevice)\n throws Exception {\n Class class1 = Class.forName(\"android.bluetooth.BluetoothDevice\");\n Method createBondMethod = class1.getMethod(\"createBond\");\n Boolean returnValue = (Boolean) createBondMethod.invoke(btDevice);\n return returnValue.booleanValue();\n }*/\n\n class ButtonClicked implements View.OnClickListener {\n @Override\n public void onClick(View view) {\n switch (view.getId()) {\n case R.id.buttonOn:\n onBluetooth();\n break;\n case R.id.buttonSearch:\n arrayListBluetoothDevices.clear();\n startSearching();\n break;\n case R.id.buttonOff:\n offBluetooth();\n break;\n case R.id.buttonWeight:\n getWeight();\n break;\n default:\n break;\n }\n }\n }\n\n private BroadcastReceiver myReceiver = new BroadcastReceiver() {\n @Override\n public void onReceive(Context context, Intent intent) {\n Message msg = Message.obtain();\n String action = intent.getAction();\n if (BluetoothDevice.ACTION_FOUND.equals(action)) {\n Toast.makeText(context, \"ACTION_FOUND\", Toast.LENGTH_SHORT).show();\n\n BluetoothDevice device = intent.getParcelableExtra(BluetoothDevice.EXTRA_DEVICE);\n try {\n //device.getClass().getMethod(\"setPairingConfirmation\", boolean.class).invoke(device, true);\n //device.getClass().getMethod(\"cancelPairingUserInput\", boolean.class).invoke(device);\n } catch (Exception e) {\n Log.i(\"Log\", \"Inside the exception: \");\n e.printStackTrace();\n }\n\n if (arrayListBluetoothDevices.size() < 1) // this checks if the size of bluetooth device is 0,then add the\n { // device to the arraylist.\n detectedAdapter.add(device.getName() + \"\\n\" + device.getAddress());\n arrayListBluetoothDevices.add(device);\n detectedAdapter.notifyDataSetChanged();\n } else {\n boolean flag = true; // flag to indicate that particular device is already in the arlist or not\n for (int i = 0; i < arrayListBluetoothDevices.size(); i++) {\n if (device.getAddress().equals(arrayListBluetoothDevices.get(i).getAddress())) {\n flag = false;\n }\n }\n if (flag == true) {\n detectedAdapter.add(device.getName() + \"\\n\" + device.getAddress());\n arrayListBluetoothDevices.add(device);\n detectedAdapter.notifyDataSetChanged();\n }\n }\n }\n }\n };\n\n private void startSearching() {\n Toast.makeText(getActivity(), \"Searching...\", Toast.LENGTH_SHORT).show();\n IntentFilter intentFilter = new IntentFilter(BluetoothDevice.ACTION_FOUND);\n getActivity().registerReceiver(myReceiver, intentFilter);\n bluetoothAdapter.startDiscovery();\n }\n\n private void onBluetooth() {\n if (!bluetoothAdapter.isEnabled()) {\n bluetoothAdapter.enable();\n Log.i(\"Log\", \"Bluetooth is Enabled\");\n }\n }\n\n private void offBluetooth() {\n if (bluetoothAdapter.isEnabled()) {\n bluetoothAdapter.disable();\n }\n }\n\n public void getWeight() {\n// if (mConnectedThread == null){\n// Log.e(\"Error Connected \", \"null\");\n// return;\n// }\n mConnectedThread = new ConnectedThread(mConnectThread.getSocket());\n mConnectedThread.start();\n }\n\n\n @Override\n public void onDetach() {\n super.onDetach();\n mListener = null;\n }\n\n /**\n * This interface must be implemented by activities that contain this\n * fragment to allow an interaction in this fragment to be communicated\n * to the activity and potentially other fragments contained in that\n * activity.\n * <p/>\n * See the Android Training lesson <a href=\n * \"http://developer.android.com/training/basics/fragments/communicating.html\"\n * >Communicating with Other Fragments</a> for more information.\n */\n public interface OnFragmentInteractionListener {\n // TODO: Update argument type and name\n public void onFragmentInteraction(Uri uri);\n }\n\n\n\n @Override\n public void onPause()\n {\n try {\n getActivity().unregisterReceiver(myReceiver);\n }catch (IllegalArgumentException e){\n e.printStackTrace();\n }\n\n super.onStop();\n }\n\n\n\n\n\n}\n" } ]
6
IlsalaciousCrum/hb_dicts_restaurant-ratings
https://github.com/IlsalaciousCrum/hb_dicts_restaurant-ratings
4c47d62c28c99411d745e009cd53e362d3f23aab
7a593911eeb961f35ad8dafcfadb98e484cc27b7
7cbce0c658f39f46f29d4d6feb48337114811fc3
refs/heads/master
2016-09-22T17:33:27.912018
2016-07-12T01:05:53
2016-07-12T01:05:53
63,108,514
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6456876397132874, "avg_line_length": 20.450000762939453, "blob_id": "bc1c853f36dab30092b58b26ef4b5546be380601", "content_id": "d0ae06e090b45cc915fb01b96aab6ee378d472b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 429, "license_type": "no_license", "max_line_length": 62, "num_lines": 20, "path": "/restaurant-ratings.py", "repo_name": "IlsalaciousCrum/hb_dicts_restaurant-ratings", "src_encoding": "UTF-8", "text": "score = {} \ninput_file = open(\"scores.txt\")\nfor line in input_file:\n line = line.rstrip()\n line = line.split(\":\")\n score[line[0]]=line[1] \n\nrestaurant_list = score.items()\nrestaurant_list.sort()\n\nfor item in restaurant_list:\n print \"%s is rated at %s\" % (item[0], item[1]) \n\n\n\n\n# create a list of keys\n# sort the keys\n# for each key in the sorted keys:\n# print the restaurant name and score# your code goes here\n" } ]
1
apatyk/Daily-Pattern-Classifier
https://github.com/apatyk/Daily-Pattern-Classifier
b6a8eb1dfcd44c760393b7f7875b8901862db3f7
705a1120ceb63cd1709235abec2ec62605f44b41
6f7041996a6f2d69b689c3b8561f95e5aadbacea
refs/heads/main
2023-06-23T06:58:29.537488
2021-07-23T16:40:27
2021-07-23T16:40:27
388,189,903
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5086464285850525, "alphanum_fraction": 0.5236758589744568, "avg_line_length": 32.36857986450195, "blob_id": "03b52802e449c62d7386e46488f6bad56986286a", "content_id": "1b417f708596576c4840518104bc9e6d7f619111", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11045, "license_type": "no_license", "max_line_length": 139, "num_lines": 331, "path": "/common/testing.py", "repo_name": "apatyk/Daily-Pattern-Classifier", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\ndef get_raw_data(samples, labels, data):\n axdata = []\n aydata = []\n\n for i in range(len(labels)):\n f = samples[i][0]\n t1 = samples[i][1]\n t2 = samples[i][2]\n sample = data[f][t1:t2]\n label = labels[i]\n axdata.append(sample)\n aydata.append(label)\n\n rawsamples = np.array(axdata, copy=True)\n rawlabels = np.array(aydata, copy=True)\n del axdata\n del aydata\n \n return rawsamples, rawlabels\n\ndef chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n \ndef get_raw_samples(samples, data):\n axdata = []\n\n for i in range(len(samples)):\n f = samples[i][0]\n t1 = samples[i][1]\n t2 = samples[i][2]\n sample = data[f][t1:t2]\n axdata.append(sample)\n\n rawsamples = np.array(axdata, copy=True)\n del axdata\n \n return rawsamples\n\ndef weighted_accuracy(TP, FP, TN, FN):\n P = TP + FN\n N = TN + FP\n if P > 0:\n W = N/P\n else: \n W = 1\n WAcc = (W * TP + TN) / (W * P + N)\n return WAcc\n\ndef true_positive_rate(TP, FN):\n if TP + FN == 0:\n tpr = 0\n else:\n tpr = TP / (TP + FN)\n return tpr\n\ndef true_negative_rate(TN, FP):\n if TN + FP == 0:\n tnr = 0\n else:\n tnr = TN / (TN + FP)\n return tnr\n\ndef f1_score(TP, FP, FN):\n if TP == 0 and FP == 0 and FN == 0:\n f1 = 0\n else:\n f1 = TP / (TP + 0.5 * (FP + FN))\n return f1\n\ndef precision(TP, FP):\n if TP + FP == 0:\n p = 0\n else:\n p = TP / (TP + FP)\n return p\n\ndef consecutive_groups(data, stepsize=1):\n segments = np.split(data, np.where(np.diff(data) != stepsize)[0]+1)\n bookends = []\n for segment in segments:\n bookends.append((segment[0], segment[-1]))\n\n return bookends\n\ndef hysteresis_threshold(model, samples, labels, start_threshold, end_threshold, winmin, stepsec, episode_min=1.):\n \"\"\"\n model: tensorflow model\n samples: samples of raw data\n labels: labels for raw data\n start_threshold: high threshold of the beginning of segmentation\n end_threshold: low threshold of the end of segmentation\n winmin: size of a window sample in unit of minute\n stepsec: stride to move the window in unit of second / the number of second between two adjacent window samples\n episode_min: the minimum length of eating episode in unit of minute. If end of segmentation -start of segmentation < episode_min,\n then the episode will not be counted\n \"\"\"\n import pandas as pd\n result_ls = []\n t_pause = winmin / 2 * 60\n\n result = {'segment_start':[], 'segment_end':[], 'prob':[], 'predictions':np.zeros([len(labels)], dtype=int), 'segment_count':0}\n \n probs = tf.squeeze(model.predict(samples, batch_size=4096))\n samples = tf.squeeze(samples)\n state, start, end = 0, 0, 0\n pause_counter = 0\n for i in range(len(labels)):\n prob = probs[i].numpy()\n result['prob'].append(prob)\n\n if state == 0 and prob > start_threshold:\n state = 1\n start = i\n elif state == 1 and prob < end_threshold:\n state = 2\n end = i+1 # for Python list slicing\n pause_counter = 0\n elif state == 2:\n if prob > start_threshold:\n state = 1\n else:\n pause_counter += stepsec\n if pause_counter >= t_pause:\n # convert time to second and check threshold\n if (end-start)*stepsec >= episode_min*60:\n # save data\n result['segment_start'].append(start)\n result['segment_end'].append(end)\n result['segment_count'] += 1\n result['predictions'][start:end] = 1\n pass\n end = 0\n state = 0\n if state == 1: # catch meal if it ends at the end of probabilities\n end = i\n if pause_counter >= t_pause and (end-start)*stepsec >= episode_min*60:\n # save data\n result['segment_start'].append(start)\n result['segment_end'].append(end)\n result['segment_count'] += 1\n result['predictions'][start:end] = 1\n \n result_ls.append(result)\n \n return pd.DataFrame(result_ls)\n\ndef hysteresis_threshold_probs(probs, labels, start_threshold, end_threshold, winmin, stepsec, episode_min=1.):\n \"\"\"\n probs: model output probabilities from samples\n labels: labels for raw data\n start_threshold: high threshold of the beginning of segmentation\n end_threshold: low threshold of the end of segmentation\n winmin: size of a window sample in unit of minute\n stepsec: stride to move the window in unit of second / the number of second between two adjacent window samples\n episode_min: the minimum length of eating episode in unit of minute. If end of segmentation -start of segmentation < episode_min,\n then the episode will not be counted\n \"\"\"\n import pandas as pd\n result_ls = []\n t_pause = winmin / 2 * 60\n\n result = {'segment_start':[], 'segment_end':[], 'prob':[], 'predictions':np.zeros([len(labels)], dtype=int), 'segment_count':0}\n \n state, start, end = 0, 0, 0\n pause_counter = 0\n for i in range(len(labels)):\n prob = probs[i]\n result['prob'].append(prob)\n\n if state == 0 and prob > start_threshold:\n state = 1\n start = i\n elif state == 1 and prob < end_threshold:\n state = 2\n end = i+1 # for Python list slicing\n pause_counter = 0\n elif state == 2:\n if prob > start_threshold:\n state = 1\n else:\n pause_counter += stepsec\n if pause_counter >= t_pause:\n # convert time to second and check threshold\n if (end-start)*stepsec >= episode_min*60:\n # save data\n result['segment_start'].append(start)\n result['segment_end'].append(end)\n result['segment_count'] += 1\n result['predictions'][start:end] = 1\n pass\n end = 0\n state = 0\n if state == 1: # catch meal if it ends at the end of probabilities\n end = i\n if pause_counter >= t_pause and (end-start)*stepsec >= episode_min*60:\n # save data\n result['segment_start'].append(start)\n result['segment_end'].append(end)\n result['segment_count'] += 1\n result['predictions'][start:end] = 1\n \n result_ls.append(result)\n \n return pd.DataFrame(result_ls)\n\ndef single_threshold(probs, labels, winmin, stepsec, threshold=0.5, episode_min=1.):\n \"\"\"\n probs: model output probabilities from samples\n labels: labels for raw data\n \"\"\"\n import pandas as pd\n result_ls = []\n t_pause = winmin / 2 * 60\n\n result = {'segment_start':[], 'segment_end':[], 'prob':[], 'predictions':np.zeros([len(labels)], dtype=int), 'segment_count':0}\n \n state, start, end = 0, 0, 0\n pause_counter = 0\n for i in range(len(labels)):\n prob = probs[i]\n result['prob'].append(prob)\n\n if state == 0 and prob > threshold:\n state = 1\n start = i\n elif state == 1 and prob < threshold:\n state = 2\n end = i+1 # for Python list slicing\n pause_counter = 0\n elif state == 2:\n if prob > threshold:\n state = 1\n else:\n pause_counter += stepsec\n if pause_counter >= t_pause:\n # convert time to second and check threshold\n if (end-start)*stepsec >= episode_min*60:\n # save data\n result['segment_start'].append(start)\n result['segment_end'].append(end)\n result['segment_count'] += 1\n result['predictions'][start:end] = 1\n pass\n end = 0\n state = 0\n if state == 1: # catch meal if it ends at the end of probabilities\n end = i\n if pause_counter >= t_pause and (end-start)*stepsec >= episode_min*60:\n # save data\n result['segment_start'].append(start)\n result['segment_end'].append(end)\n result['segment_count'] += 1\n result['predictions'][start:end] = 1\n \n result_ls.append(result)\n \n return pd.DataFrame(result_ls)\n\ndef calc_episode_metrics(results, labels):\n \"\"\"\n results: pandas dataframe output by hysteresis_threshold()\n labels: GT labels for raw data\n \"\"\"\n TP, FP, FN = 0, 0, 0\n\n gt_indices = np.where(labels == 1)\n if np.size(gt_indices) != 0:\n gt_segments = consecutive_groups(gt_indices[0])\n else:\n gt_segments = []\n eating_segments = list(zip(results['segment_start'][0], [x-1 for x in results['segment_end'][0]])) # to account for Python list slicing\n \n GTEval = [-1] * len(gt_segments)\n MDEval = [-1] * len(eating_segments)\n\n # TP - GT event, model event (any overlap) - 1\n # FN - GT event, missed by model - 2\n # FP - no event, model event - 3\n\n # look for matches with GT events\n for i, (gt_start, gt_end) in enumerate(gt_segments):\n for e, (md_start, md_end) in enumerate(eating_segments):\n # (1) MD within GT\n # (2) MD starts before GT and ends in GT\n # (3) MD starts in GT and ends after GT\n # (4) MD contains GT\n if (md_start >= gt_start and md_end <= gt_end) or \\\n (md_start <= gt_start and md_end > gt_start and md_end <= gt_end) or \\\n (md_start >= gt_start and md_start < gt_end and md_end >= gt_end) or \\\n (md_start <= gt_start and md_end >= gt_end): \n GTEval[i] = e\n MDEval[e] = i\n \n # count up classifications\n for i in range(len(gt_segments)):\n if GTEval[i] == -1:\n FN += 1\n else:\n TP += 1\n for e in range(len(eating_segments)):\n if MDEval[e] == -1:\n FP += 1\n\n return TP, FP, FN\n\ndef calc_time_metrics(MD, GT):\n \"\"\"\n MD: array of model detection 1s and 0s to signify eating and non-eating\n GT: array of GT 1s and 0s to signify eating and non-eating\n \"\"\"\n TP, FP, TN, FN = 0, 0, 0, 0\n \n # Count TP, FP, TN, FN\n for i in range(len(GT)):\n if MD[i] == 1:\n if GT[i] == 1: \n TP += 1\n else:\n FP += 1\n else:\n if GT[i] == 0:\n TN += 1\n else:\n FN += 1\n \n return TP, FP, TN, FN\n" }, { "alpha_fraction": 0.5760542154312134, "alphanum_fraction": 0.6807228922843933, "avg_line_length": 34.89189147949219, "blob_id": "eded8c6033df79d6f161637fae48124ac96a8e44", "content_id": "d62b3e0d1c0490b9bc3acffeda1f84b29aa7e8b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1328, "license_type": "no_license", "max_line_length": 135, "num_lines": 37, "path": "/GenerateSamples/SubmitGenSamplesJob.py", "repo_name": "apatyk/Daily-Pattern-Classifier", "src_encoding": "UTF-8", "text": "import os\n\n#PBS -l select=1:ncpus=28:mem=120gb:ngpus=2:gpu_model=p100:interconnect=fdr,walltime=4:00:00\n#PBS -l select=1:ncpus=24:mem=120gb:ngpus=2:gpu_model=k40:interconnect=fdr,walltime=4:00:00\n#PBS -l select=1:ncpus=40:mem=370gb:ngpus=2:gpu_model=v100:interconnect=hdr,walltime=72:00:00\n#PBS -l select=1:ncpus=56:mem=370gb:ngpus=2:gpu_model=v100s:interconnect=hdr,walltime=72:00:00\n\nwindow_lengths = [2, 4, 6, 8, 10]\n#window_lengths = [6]\n\nfor W in window_lengths:\n\n pbsfile = open(\"job.pbs\", \"w\")\n\n pbsfile.write(\"\"\"#PBS -u apatyk\n #PBS -N GenSamples_{:d}Min\n #PBS -l select=1:ncpus=40:mem=370gb:ngpus=2:gpu_model=v100:interconnect=hdr,walltime=72:00:00\n #PBS -m ae\n #PBS -o /home/apatyk/Research/GenerateSamples/results/gen-samples-{:d}min.txt\n #PBS -j oe\n\n \"\"\".format(W, W))\n\n pbsfile.write(\"\"\"ulimit -c 0\n source /software/spackages/linux-centos8-x86_64/gcc-8.3.1/anaconda3-2019.10-v5cuhr6keyz5ryxcwvv2jkzfj2gwrj4a/etc/profile.d/conda.sh\n module load cuda/10.2.89-gcc/8.3.1 cudnn/8.0.0.180-10.2-linux-x64-gcc/8.3.1 anaconda3/2019.10-gcc/8.3.1\n conda activate tf_env\n cd ~/Research/GenerateSamples/ \n python GenerateSamples.py {:d}\n \"\"\".format(W))\n pbsfile.write(\"\"\"if [ $? -ne 0 ]; then\n rm core.*\n fi\n exit\"\"\")\n\n pbsfile.close()\n os.system(\"qsub job.pbs\")\n" }, { "alpha_fraction": 0.5242620706558228, "alphanum_fraction": 0.550987184047699, "avg_line_length": 40.25120162963867, "blob_id": "d5a12ac19a6826226f647f52ba8df98df82afa29", "content_id": "9cdbf19442eeccfeea63d701549003ddcaa30e11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25781, "license_type": "no_license", "max_line_length": 152, "num_lines": 625, "path": "/common/loadfile.py", "repo_name": "apatyk/Daily-Pattern-Classifier", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport sys\nfrom tqdm import tqdm\nimport math\n\nACC_THRESH = 0.008 # sum(acc) (G) max value for stddev rest\nGYRO_THRESH = 0.04 * 180.0/ math.pi\n\ndef loadshmfile(File_Name):\n RawData = np.fromfile(File_Name, dtype=np.dtype(\"6f4\")) \n \n # Swap gyroscope axis. Remember python always uses variables with reference.\n # Swap Acceleromter\n Temp = np.copy(RawData[:,5])\n Temp2 = np.copy(RawData[:,3])\n Temp3 = np.copy(RawData[:,4])\n RawData[:,3] = Temp\n RawData[:,4] = Temp2\n RawData[:,5] = Temp3\n \n return RawData\n\ndef smooth(RawData):\n # Create kernel\n SIG = 10.0\n WINDOW_SIZE = 15 # size of window\n r_array = np.linspace(14,0, 15)\n Kernel = np.exp((0-np.square(r_array))/(2 * SIG * SIG))\n deno = sum(Kernel)\n Kernel = Kernel / deno\n \n Smoothed = np.copy(RawData) # Clone (deep copy) the variable, instead of reference. We don't want to change RawData\n\n r,c = RawData.shape\n \n for x in range(c):\n Smoothed[WINDOW_SIZE-1:,x] = np.convolve(RawData[:,x], Kernel, mode='valid') # Convolution followed by discarding of extra values after boundary\n\n # Copy first 15 values from Rawdata to Smoothed. np.convolve doesn't do this.\n Smoothed[:15, :] = RawData[:15,:]\n return Smoothed\n\ndef loadEvents(filename):\n \"\"\"\n loads events data given the .shm filename\n \"\"\"\n # Load the meals file to get any triaged meals.\n SkippedMeals = []\n mealsfile = open(\"../common/meals-shimmer.txt\", \"r\") \n for line in mealsfile:\n #print(line)\n data = line.split()\n #print(data[0], data[1], data[13])\n if(int(data[13]) == 0):\n Mdata = [data[0][-9:], data[1], int(data[13])]\n SkippedMeals.append(Mdata)\n \n EventsFileName = filename[:len(filename)-4]+\"-events.txt\"\n \n # Load the meals\n EventNames = []\n EventStart = (np.zeros((100))).astype(int)\n EventEnd = (np.zeros((100))).astype(int)\n TotalEvents = 0\n TimeOffset = 0\n file = open(EventsFileName, \"r\") \n #print(filename)\n for lines in file:\n #print(lines)\n words = lines.split()\n if(len(words) == 0): continue # Skip empty lines\n # Convert Start time to offset\n if(words[0] == \"START\"): # Get Start Time (TimeOffset) from file\n #print(words)\n hours = int(words[2].split(\":\")[0])\n minutes = int(words[2].split(\":\")[1])\n seconds = int(words[2].split(\":\")[2])\n #print(\"{}h:{}m:{}s\".format(hours, minutes,seconds))\n TimeOffset = (hours * 60 * 60) + (minutes * 60) + seconds\n continue\n if(words[0] == \"END\"):\n #print(words)\n continue\n for x in range(1,3): # Process Events Data\n hours = int(words[x].split(\":\")[0])\n minutes = int(words[x].split(\":\")[1])\n seconds = int(words[x].split(\":\")[2])\n EventTime = (hours * 60 * 60) + (minutes * 60) + seconds\n EventTime = EventTime - TimeOffset\n if(x == 1): EventStart[TotalEvents] = EventTime * 15\n if(x == 2): EventEnd[TotalEvents] = EventTime * 15\n if(TotalEvents>0):\n if(EventStart[TotalEvents]<EventStart[TotalEvents-1]):\n EventStart[TotalEvents] = EventStart[TotalEvents] + (24*60*60*15)\n if(EventEnd[TotalEvents]<EventEnd[TotalEvents-1]):\n EventEnd[TotalEvents] = EventEnd[TotalEvents] + (24*60*60*15)\n #print(TotalEvents)\n \n # Check if meal was triaged out for too much walking or rest\n ename = words[0]\n fname = filename[-9:]\n skipmeal = 0\n #print(fname, ename)\n for skippedmeal in SkippedMeals:\n Pname, EventName, Keep = skippedmeal\n if(Pname == fname and ename == EventName):\n #print(Pname, EventName, Keep, ename, fname, Pname == fname, ename == EventName)\n skipmeal = 1\n break\n \n if(skipmeal == 1): continue\n TotalEvents = TotalEvents + 1\n EventNames.append(ename)\n return TotalEvents, EventStart, EventEnd, EventNames\n\n# Does not normalize. Normalization is done post-hoc.\ndef loadAllData3(winlength, step, removerest=1, removewalk=0, removebias=1, shx=1, gtperc = 0.5):\n ### Load data, make samples \n\n samples = []\n labels = []\n AllSmoothed = []\n AllIndices = []\n totaleatingrest = 0\n totaleatingwalk = 0\n df = pd.read_csv('../common/batch-unix.txt', names=[\"Filenames\"])\n for x in tqdm(range(len(df[\"Filenames\"]))):\n fileeatingrest = 0\n fileeatingwalk = 0\n filesamples = []\n filelabels = []\n File_Name = \"/home/apatyk/\" + df[\"Filenames\"][x]\n RawData = loadshmfile(File_Name)\n Smoothed = smooth(RawData)\n Normalized = np.empty_like(Smoothed)\n\n if(removebias):\n # Remove acceleration bias\n TREND_WINDOW = 150\n mean = []\n for j in range(3):\n dat = pd.Series(Smoothed[:,j]).rolling(window=TREND_WINDOW).mean()\n dat[:TREND_WINDOW-1] = 0\n mean.append(dat)\n\n mean2 = np.roll(np.asarray(mean).transpose(), -((TREND_WINDOW//2)-1)*3) # Shift to the left to center the values\n # The last value in mean [-75] does not match that of phoneview, but an error in one datum is acceptable\n # The phone view code calculates mean from -Window/2 to <Window/2 instead of including it.\n\n Smoothed[:,0:3]-=mean2\n del mean2, mean, dat\n \n AllSmoothed.append(np.copy(Smoothed))\n \n if(removerest != 0):\n std = []\n for j in range(6):\n dat = pd.Series(Smoothed[:,j]).rolling(window=15).std(ddof=0)\n dat[:14] = 0\n std.append(dat)\n # Above doesn't center window. Left Shift all values to the left by 7 datum (6 sensors)\n std2 = np.roll(np.asarray(std).transpose(), -7*6) \n accstd = np.sum(std2[:,:3], axis=1)\n gyrostd = np.sum(std2[:,-3:], axis=1)\n datrest = (accstd < ACC_THRESH) & (gyrostd < GYRO_THRESH)\n mrest = datrest.copy()\n\n for i in range(8,len(datrest)-7):\n if(datrest[i]==True):\n mrest[i-7:i+8] = True\n \n del dat, datrest, gyrostd, accstd, std2, std\n \n if(removewalk!=0):\n minv = np.zeros((3,1))\n maxv = np.zeros((3,1))\n zerocross = np.zeros((len(Smoothed),1)).astype(int)\n for j in range(3):\n minv[j]=999.9\n maxv[j]=-999.9\n\n for t in range(len(Smoothed)-1):\n for j in range(3):\n if (Smoothed[t][j+3] < minv[j]):\n minv[j]=Smoothed[t][j+3]\n if (Smoothed[t][j+3] > maxv[j]):\n maxv[j]=Smoothed[t][j+3]\n if ((Smoothed[t][j+3] < 0.0) and (Smoothed[t+1][j+3] > 0.0) and (minv[j] < -5.0)):\n zerocross[t]+=(1<<j)\n minv[j]=999.9\n maxv[j]=-999.9\n if ((Smoothed[t][j+3] > 0.0) and (Smoothed[t+1][j+3] < 0.0) and (maxv[j] > 5.0)):\n zerocross[t]+=(1<<(j+3))\n minv[j]=999.9\n maxv[j]=-999.9\n\n zc = [0 if i==0 else 1 for i in zerocross]\n del minv, maxv, zerocross\n \n del RawData\n\n # Identify things as GT\n [TotalEvents, EventStart, EventEnd, EventNames] = loadEvents(File_Name) #loadfile.loadEvents(File_Name)\n GT = np.zeros((len(Smoothed))).astype(int)\n for i in range(TotalEvents):\n #print(EventStart[i], EventStart[i], type(EventStart[i]))\n GT[EventStart[i]: EventEnd[i]+1] = 1\n\n # Generate labels \n MaxData = len(Smoothed)\n for t in range(0, MaxData, step):\n sample = [x, t, t+winlength]\n label = int((np.sum(GT[t:t+winlength])/winlength)>=gtperc)\n #isrest = if(removerest) else 0\n #iswalk = if(removewalk) else 0\n #if(label and isrest):\n if(label and removerest!=0): # Only ignore if in eating\n isrest = int((np.sum(mrest[t:t+winlength])/winlength)>=0.65)\n if(isrest and removerest==1): continue; # Do not consider this sample at all. Comment this if you want to move the sample to non-eating.\n elif(isrest and removerest==2): label = 0;\n else: label = 1 \n if(label and removewalk!=0): # Only ignore if in eating\n iswalk = int((np.sum(zc[t:t+winlength])/winlength)>=0.15)\n if(iswalk and removewalk==1): continue;\n elif(iswalk and removewalk==2): label=0;\n else: label = 1\n# fileeatingwalk+=1\n# continue # Do not append this sample to the dataset\n \n if(t+winlength < MaxData): # Ignore last small window. Not ignoring results in a list rather than a numpy array.\n filesamples.append(sample)\n filelabels.append(label)\n\n samples = samples + filesamples\n labels = labels + filelabels\n numsamples = (len(filesamples))\n totaleatingwalk+=fileeatingwalk\n #print(\"Loaded file {}, {} samples from {}\".format(x, numsamples,File_Name), flush=True)\n #print(\"Loaded file {}, {} samples from {}, contains {} rest in eating\".format(x,numsamples,File_Name,fileeatingrest),\n # flush=True)\n\n samples_array = np.asarray(samples)\n labels_array = np.asarray(labels)\n #print(\"Total {:d} walking in eating\\n\".format(fileeatingwalk))\n return len(df[\"Filenames\"]), AllSmoothed, samples_array, labels_array\n\n# Reads from designated file. Useful for handedness and grouping. Does not normalize. Normalization is done post-hoc.\ndef loadAllData4(filename, winlength, step, removerest=1, removewalk=0, removebias=1, shx=1, gtperc = 0.5):\n ### Load data, make samples \n\n samples = []\n labels = []\n AllSmoothed = []\n AllIndices = []\n totaleatingrest = 0\n totaleatingwalk = 0\n df = pd.read_csv(filename, names=[\"Filenames\"])\n for x in tqdm(range(len(df[\"Filenames\"]))):\n #for x in tqdm(range(10)):\n fileeatingrest = 0\n fileeatingwalk = 0\n filesamples = []\n filelabels = []\n File_Name = \"/home/apatyk/\" + df[\"Filenames\"][x]\n RawData = loadshmfile(File_Name) #loadfile.loadshmfile(File_Name)\n Smoothed = smooth(RawData) #loadfile.smooth(RawData) \n Normalized = np.empty_like(Smoothed)\n\n if(removebias):\n # Remove acceleration bias\n TREND_WINDOW = 150\n mean = []\n for j in range(3):\n dat = pd.Series(Smoothed[:,j]).rolling(window=TREND_WINDOW).mean()\n dat[:TREND_WINDOW-1] = 0\n mean.append(dat)\n\n mean2 = np.roll(np.asarray(mean).transpose(), -((TREND_WINDOW//2)-1)*3) # Shift to the left to center the values\n # The last value in mean [-75] does not match that of phoneview, but an error in one datum is acceptable\n # The phone view code calculates mean from -Window/2 to <Window/2 instead of including it.\n\n Smoothed[:,0:3]-=mean2\n del mean2, mean, dat\n \n AllSmoothed.append(np.copy(Smoothed))\n \n if(removerest != 0):\n std = []\n for j in range(6):\n dat = pd.Series(Smoothed[:,j]).rolling(window=15).std(ddof=0)\n dat[:14] = 0\n std.append(dat)\n # Above doesn't center window. Left Shift all values to the left by 7 datum (6 sensors)\n std2 = np.roll(np.asarray(std).transpose(), -7*6) \n accstd = np.sum(std2[:,:3], axis=1)\n gyrostd = np.sum(std2[:,-3:], axis=1)\n datrest = (accstd < ACC_THRESH) & (gyrostd < GYRO_THRESH)\n mrest = datrest.copy()\n\n for i in range(8,len(datrest)-7):\n if(datrest[i]==True):\n mrest[i-7:i+8] = True\n \n del dat, datrest, gyrostd, accstd, std2, std\n \n if(removewalk!=0):\n minv = np.zeros((3,1))\n maxv = np.zeros((3,1))\n zerocross = np.zeros((len(Smoothed),1)).astype(int)\n for j in range(3):\n minv[j]=999.9\n maxv[j]=-999.9\n\n for t in range(len(Smoothed)-1):\n for j in range(3):\n if (Smoothed[t][j+3] < minv[j]):\n minv[j]=Smoothed[t][j+3]\n if (Smoothed[t][j+3] > maxv[j]):\n maxv[j]=Smoothed[t][j+3]\n if ((Smoothed[t][j+3] < 0.0) and (Smoothed[t+1][j+3] > 0.0) and (minv[j] < -5.0)):\n zerocross[t]+=(1<<j)\n minv[j]=999.9\n maxv[j]=-999.9\n if ((Smoothed[t][j+3] > 0.0) and (Smoothed[t+1][j+3] < 0.0) and (maxv[j] > 5.0)):\n zerocross[t]+=(1<<(j+3))\n minv[j]=999.9\n maxv[j]=-999.9\n\n zc = [0 if i==0 else 1 for i in zerocross]\n del minv, maxv, zerocross\n \n del RawData\n\n # Identify things as GT\n [TotalEvents, EventStart, EventEnd, EventNames] = loadEvents(File_Name) #loadfile.loadEvents(File_Name)\n GT = np.zeros((len(Smoothed))).astype(int)\n for i in range(TotalEvents):\n #print(EventStart[i], EventStart[i], type(EventStart[i]))\n GT[EventStart[i]: EventEnd[i]+1] = 1\n\n # Generate labels \n MaxData = len(Smoothed)\n for t in range(0, MaxData, step):\n sample = [x, t, t+winlength]\n label = int((np.sum(GT[t:t+winlength])/winlength)>=gtperc)\n #isrest = if(removerest) else 0\n #iswalk = if(removewalk) else 0\n #if(label and isrest):\n if(label and removerest!=0): # Only ignore if in eating\n isrest = int((np.sum(mrest[t:t+winlength])/winlength)>=0.65)\n if(isrest and removerest==1): continue; # Do not consider this sample at all. Comment this if you want to move the sample to non-eating.\n elif(isrest and removerest==2): label = 0;\n else: label = 1 \n if(label and removewalk!=0): # Only ignore if in eating\n iswalk = int((np.sum(zc[t:t+winlength])/winlength)>=0.15)\n if(iswalk and removewalk==1): continue;\n elif(iswalk and removewalk==2): label=0;\n else: label = 1\n# fileeatingwalk+=1\n# continue # Do not append this sample to the dataset\n \n if(t+winlength < MaxData): # Ignore last small window. Not ignoring results in a list rather than a numpy array.\n filesamples.append(sample)\n filelabels.append(label)\n\n samples = samples + filesamples\n labels = labels + filelabels\n numsamples = (len(filesamples))\n totaleatingwalk+=fileeatingwalk\n #print(\"Loaded file {}, {} samples from {}\".format(x, numsamples,File_Name), flush=True)\n #print(\"Loaded file {}, {} samples from {}, contains {} rest in eating\".format(x,numsamples,File_Name,fileeatingrest),\n # flush=True)\n\n samples_array = np.asarray(samples)\n labels_array = np.asarray(labels)\n #print(\"Total {:d} walking in eating\\n\".format(fileeatingwalk))\n return len(df[\"Filenames\"]), AllSmoothed, samples_array, labels_array\n\ndef loadSingleData(filenum, winlength, step, removerest=1, removewalk=0, removebias=1, shx=1, gtperc = 0.5):\n ### Load data, make samples \n\n samples = []\n labels = []\n AllSmoothed = []\n AllIndices = []\n totaleatingrest = 0\n totaleatingwalk = 0\n df = pd.read_csv('../common/batch-unix.txt', names=[\"Filenames\"])\n x = filenum\n fileeatingrest = 0\n fileeatingwalk = 0\n filesamples = []\n filelabels = []\n File_Name = \"/home/apatyk/\" + df[\"Filenames\"][x]\n RawData = loadshmfile(File_Name)\n Smoothed = smooth(RawData)\n Normalized = np.empty_like(Smoothed)\n\n if(removebias):\n # Remove acceleration bias\n TREND_WINDOW = 150\n mean = []\n for j in range(3):\n dat = pd.Series(Smoothed[:,j]).rolling(window=TREND_WINDOW).mean()\n dat[:TREND_WINDOW-1] = 0\n mean.append(dat)\n\n mean2 = np.roll(np.asarray(mean).transpose(), -((TREND_WINDOW//2)-1)*3) # Shift to the left to center the values\n # The last value in mean [-75] does not match that of phoneview, but an error in one datum is acceptable\n # The phone view code calculates mean from -Window/2 to <Window/2 instead of including it.\n\n Smoothed[:,0:3]-=mean2\n del mean2, mean, dat\n\n AllSmoothed.append(np.copy(Smoothed))\n\n if(removerest != 0):\n std = []\n for j in range(6):\n dat = pd.Series(Smoothed[:,j]).rolling(window=15).std(ddof=0)\n dat[:14] = 0\n std.append(dat)\n # Above doesn't center window. Left Shift all values to the left by 7 datum (6 sensors)\n std2 = np.roll(np.asarray(std).transpose(), -7*6) \n accstd = np.sum(std2[:,:3], axis=1)\n gyrostd = np.sum(std2[:,-3:], axis=1)\n datrest = (accstd < ACC_THRESH) & (gyrostd < GYRO_THRESH)\n mrest = datrest.copy()\n\n for i in range(8,len(datrest)-7):\n if(datrest[i]==True):\n mrest[i-7:i+8] = True\n\n del dat, datrest, gyrostd, accstd, std2, std\n\n if(removewalk!=0):\n minv = np.zeros((3,1))\n maxv = np.zeros((3,1))\n zerocross = np.zeros((len(Smoothed),1)).astype(int)\n for j in range(3):\n minv[j]=999.9\n maxv[j]=-999.9\n\n for t in range(len(Smoothed)-1):\n for j in range(3):\n if (Smoothed[t][j+3] < minv[j]):\n minv[j]=Smoothed[t][j+3]\n if (Smoothed[t][j+3] > maxv[j]):\n maxv[j]=Smoothed[t][j+3]\n if ((Smoothed[t][j+3] < 0.0) and (Smoothed[t+1][j+3] > 0.0) and (minv[j] < -5.0)):\n zerocross[t]+=(1<<j)\n minv[j]=999.9\n maxv[j]=-999.9\n if ((Smoothed[t][j+3] > 0.0) and (Smoothed[t+1][j+3] < 0.0) and (maxv[j] > 5.0)):\n zerocross[t]+=(1<<(j+3))\n minv[j]=999.9\n maxv[j]=-999.9\n\n zc = [0 if i==0 else 1 for i in zerocross]\n del minv, maxv, zerocross\n \n del RawData\n\n # Identify things as GT\n [TotalEvents, EventStart, EventEnd, EventNames] = loadEvents(File_Name) #loadfile.loadEvents(File_Name)\n GT = np.zeros((len(Smoothed))).astype(int)\n for i in range(TotalEvents):\n #print(EventStart[i], EventStart[i], type(EventStart[i]))\n GT[EventStart[i]: EventEnd[i]+1] = 1\n\n # Generate labels \n MaxData = len(Smoothed)\n for t in range(0, MaxData, step):\n sample = [x, t, t+winlength]\n label = int((np.sum(GT[t:t+winlength])/winlength)>=gtperc)\n #isrest = if(removerest) else 0\n #iswalk = if(removewalk) else 0\n #if(label and isrest):\n if(label and removerest!=0): # Only ignore if in eating\n isrest = int((np.sum(mrest[t:t+winlength])/winlength)>=0.65)\n if(isrest and removerest==1): continue; # Do not consider this sample at all. Comment this if you want to move the sample to non-eating.\n elif(isrest and removerest==2): label = 0;\n else: label = 1 \n if(label and removewalk!=0): # Only ignore if in eating\n iswalk = int((np.sum(zc[t:t+winlength])/winlength)>=0.15)\n if(iswalk and removewalk==1): continue;\n elif(iswalk and removewalk==2): label=0;\n else: label = 1\n# fileeatingwalk+=1\n# continue # Do not append this sample to the dataset\n \n if(t+winlength < MaxData): # Ignore last small window. Not ignoring results in a list rather than a numpy array.\n filesamples.append(sample)\n filelabels.append(label)\n\n samples = samples + filesamples\n labels = labels + filelabels\n numsamples = (len(filesamples))\n totaleatingwalk+=fileeatingwalk\n #print(\"Loaded file {}, {} samples from {}\".format(x, numsamples,File_Name), flush=True)\n #print(\"Loaded file {}, {} samples from {}, contains {} rest in eating\".format(x,numsamples,File_Name,fileeatingrest),\n # flush=True)\n\n samples_array = np.asarray(samples)\n labels_array = np.asarray(labels)\n #print(\"Total {:d} walking in eating\\n\".format(fileeatingwalk))\n return len(df[\"Filenames\"]), AllSmoothed, samples_array, labels_array\n\n# Global Dataset Normalization\ndef globalZscoreNormalize(AllSmoothed, meanvals, stdvals):\n \n AllNormalized = []\n \n for x in range(len(AllSmoothed)):\n Smoothed = AllSmoothed[x]\n Normalized = np.empty_like(Smoothed)\n # Normalize\n for i in range(6):\n Normalized[:,i] = (Smoothed[:,i] - meanvals[i]) / stdvals[i]\n \n # Stick this Normalized data to the Full Array\n AllNormalized.append(np.copy(Normalized))\n \n return AllNormalized\n\n# load from a specific file\ndef LoadNormalizedDataOnly2(filename, removebias=1):\n ### Load data, make samples \n\n meanvals = [-0.012359981 , -0.0051663737, 0.011612018, 0.05796114, 0.1477952, -0.034395125]\n stdvals = [0.05756385, 0.040893298, 0.043825723, 17.199743, 15.311142, 21.229317]\n\n AllNormalized = []\n AllIndices = []\n df = pd.read_csv(filename, names=[\"Filenames\"])\n for x in tqdm(range(len(df[\"Filenames\"]))):\n File_Name = \"/home/apatyk/\" + df[\"Filenames\"][x]\n RawData = loadshmfile(File_Name) #loadfile.loadshmfile(File_Name)\n Smoothed = smooth(RawData) #loadfile.smooth(RawData) \n Normalized = np.empty_like(Smoothed)\n\n if(removebias):\n # Remove acceleration bias\n TREND_WINDOW = 150\n mean = []\n for j in range(3):\n dat = pd.Series(Smoothed[:,j]).rolling(window=TREND_WINDOW).mean()\n dat[:TREND_WINDOW-1] = 0\n mean.append(dat)\n\n mean2 = np.roll(np.asarray(mean).transpose(), -((TREND_WINDOW//2)-1)*3) # Shift to the left to center the values\n # The last value in mean [-75] does not match that of phoneview, but an error in one datum is acceptable\n # The phone view code calculates mean from -Window/2 to <Window/2 instead of including it.\n\n Smoothed[:,0:3]-=mean2\n del mean2, mean, dat\n \n # Normalize now\n for i in range(6):\n Normalized[:,i] = (Smoothed[:,i] - meanvals[i]) / stdvals[i]\n # Stick this Normalized data to the Full Array\n AllNormalized.append(np.copy(Normalized))\n return df[\"Filenames\"], AllNormalized\n\n# Reads from designated file. Does not normalize. Normalization is done post-hoc. GT labels generated from center of window\ndef loadAllDataTesting(filename, winlength, step, removebias=1, gtperc = 0.5):\n ### Load data, make samples \n\n samples = []\n labels = []\n AllSmoothed = []\n AllIndices = []\n df = pd.read_csv(filename, names=[\"Filenames\"])\n for x in tqdm(range(len(df[\"Filenames\"]))):\n filesamples = []\n filelabels = []\n File_Name = \"/home/apatyk/\" + df[\"Filenames\"][x]\n RawData = loadshmfile(File_Name)\n Smoothed = smooth(RawData) \n Normalized = np.empty_like(Smoothed)\n\n if(removebias):\n # Remove acceleration bias\n TREND_WINDOW = 150\n mean = []\n for j in range(3):\n dat = pd.Series(Smoothed[:,j]).rolling(window=TREND_WINDOW).mean()\n dat[:TREND_WINDOW-1] = 0\n mean.append(dat)\n\n mean2 = np.roll(np.asarray(mean).transpose(), -((TREND_WINDOW//2)-1)*3) # Shift to the left to center the values\n # The last value in mean [-75] does not match that of phoneview, but an error in one datum is acceptable\n # The phone view code calculates mean from -Window/2 to <Window/2 instead of including it.\n\n Smoothed[:,0:3]-=mean2\n del mean2, mean, dat\n \n AllSmoothed.append(np.copy(Smoothed))\n del RawData\n\n # Identify things as GT\n [TotalEvents, EventStart, EventEnd, EventNames] = loadEvents(File_Name)\n GT = np.zeros((len(Smoothed))).astype(int)\n for i in range(TotalEvents):\n GT[EventStart[i]: EventEnd[i]+1] = 1\n\n # Generate labels \n MaxData = len(Smoothed)\n for t in range(0, MaxData, step):\n if(t+winlength < MaxData): # Ignore last small window.\n sample = [x, t, t+winlength]\n #label = int((np.sum(GT[t:t+winlength])/winlength)>=gtperc) # majority vote of window (>50%)\n label = GT[t+(winlength // 2)] # middle point of window\n \n filesamples.append(sample)\n filelabels.append(label)\n\n samples = samples + filesamples\n labels = labels + filelabels\n numsamples = (len(filesamples))\n\n samples_array = np.asarray(samples)\n labels_array = np.asarray(labels)\n return AllSmoothed, samples_array, labels_array" }, { "alpha_fraction": 0.6692501902580261, "alphanum_fraction": 0.6802451610565186, "avg_line_length": 41.67692184448242, "blob_id": "fda47ecdd294ad4463ca3b030dc3d3bcf012f59b", "content_id": "816cd3fd220358459c50a032064b208ae05cf43c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5548, "license_type": "no_license", "max_line_length": 117, "num_lines": 130, "path": "/DailyPatternClassifier/TrainDailyPatternRNN.py", "repo_name": "apatyk/Daily-Pattern-Classifier", "src_encoding": "UTF-8", "text": "# Adam Patyk\n# Clemson University\n# MS Thesis: Daily Pattern Classifier\n# Summer 2021\n\n# TrainDailyPatternRNN.py\n# Purpose: Trains daily pattern classifiers for k-fold cross validation\n# Used with TestDailyPatternRNN for evaluation\n# Usage: python TrainDailyPatternRNN.py <batch_size> <num_recurrent_units> <num_training_epochs>\n\nimport sys\nimport os\nimport random\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences \nfrom sklearn.model_selection import KFold\n\nsys.path.append('../') # for .py files in ../common/\nimport common.testing as testing\n\nif len(sys.argv) != 4:\n sys.exit(\"Usage: python TrainDailyPatternRNN.py <batch_size> <num_recurrent_units> <num_training_epochs>\") \n\n# prepare for GPU workflow\ngpus = tf.config.list_physical_devices('GPU')\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\nlogical_gpus = tf.config.list_logical_devices('GPU')\n# ignore extraneous warnings\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\nseed = 42\nrandom.seed(seed)\nnp.random.seed(seed)\ntf.random.set_seed(seed)\n\nlen_threshold = 850\nk = 5\nepochs = int(sys.argv[3]) #50\nbatch_size = int(sys.argv[1]) #64\nnum_units = int(sys.argv[2]) #16\nnum_subjects = 354\nn_timesteps = len_threshold\n\n# load numpy arrays from binary .npy files (created from .txt samples in LoadFiles script)\nraw_samples = np.load('../GenerateSamples/compressed-samples/daily-samples.npy', allow_pickle=True)\nraw_labels = np.load('../GenerateSamples/compressed-samples/daily-samples.npy', allow_pickle=True)\nall_filenames = np.load('../GenerateSamples/compressed-samples/daily-filenames.npy').astype(int)\noriginal_sample_lengths = np.array([len(sample) for sample in raw_samples])\n\n# pad or truncate data sequences accordingly\nall_samples = pad_sequences(raw_samples, len_threshold, dtype='float64', padding='post', truncating='post', value=-1)\nall_labels = pad_sequences(raw_labels, len_threshold, dtype='int32', padding='post', truncating='post', value=-1)\nprint('Data ready.')\n\n# prepare k-fold cross validation\nkfold = KFold(k, shuffle=True, random_state=seed)\n# randomly shuffle array of indices\nx = range(num_subjects)\nsubjects = np.array(random.sample(x, num_subjects), copy=False)\n\ntotal_TPR, total_TNR, total_F1, total_Prec, total_WAcc = [], [], [], [], []\ntotal_ep_TPR, total_ep_F1, total_ep_FP_TP = [], [], []\n\nprint(f'Training with batch_size = {batch_size}, units = {num_units}')\nfor i, (training_subjects, testing_subjects) in enumerate(kfold.split(subjects)):\n ### TRAINING\n print(f'FOLD {i+1}') \n os.makedirs('models', exist_ok=True)\n model_path = f'models/daily-pattern-b{batch_size}-u{num_units}-e{epochs}-fold{i+1}'\n # retrieve only samples/labels corresponding to training fold\n print('Training...')\n training_bool = np.isin(all_filenames, training_subjects)\n training_samples = tf.convert_to_tensor(all_samples[training_bool], np.float32)\n training_labels = tf.convert_to_tensor(all_labels[training_bool], np.int8)\n \n training_samples = tf.reshape(training_samples, (-1, n_timesteps, 1))\n training_labels = tf.reshape(training_labels, (-1, n_timesteps, 1))\n \n tf.keras.backend.clear_session()\n mcp_save = tf.keras.callbacks.ModelCheckpoint(model_path, save_best_only=True, monitor='accuracy')\n\n # define model\n model = tf.keras.models.Sequential([\n tf.keras.layers.Masking(mask_value=-1,\n input_shape=(n_timesteps, 1)),\n tf.keras.layers.Bidirectional(\n tf.keras.layers.GRU(units=num_units, \n return_sequences=True,\n kernel_initializer='glorot_normal', # Xavier normal initialization\n bias_initializer='zeros'),\n merge_mode='sum'\n ),\n tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(1, activation='sigmoid'))\n ])\n\n model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n history = model.fit(x=training_samples, y=training_labels,\n epochs=epochs, batch_size=batch_size, verbose=2,\n callbacks=[mcp_save])\n \n ### TESTING\n print('Saving...')\n\n # retrieve only samples/labels corresponding to testing fold\n testing_bool = np.isin(all_filenames, testing_subjects)\n testing_samples = tf.convert_to_tensor(all_samples[testing_bool], np.float32)\n testing_labels = tf.convert_to_tensor(all_labels[testing_bool], np.int8)\n testing_sample_lengths = original_sample_lengths[testing_bool]\n \n testing_samples = tf.reshape(testing_samples, (-1, n_timesteps, 1))\n testing_labels = tf.reshape(testing_labels, (-1, n_timesteps, 1))\n \n # inference for all testing data using best model from training\n model = tf.keras.models.load_model(model_path)\n testing_probs = model.predict(testing_samples, batch_size=4096)\n \n # save data for post-hoc evaluation\n os.makedirs('testing', exist_ok=True)\n np.save(f'testing/testing_lengths_{epochs}epochs_fold{i+1}.npy', testing_sample_lengths)\n np.save(f'testing/testing_probs_{epochs}epochs_fold{i+1}.npy', testing_probs)\n np.save(f'testing/testing_samples_{epochs}epochs_fold{i+1}.npy', tf.squeeze(testing_samples).numpy())\n np.save(f'testing/testing_labels_{epochs}epochs_fold{i+1}.npy', tf.squeeze(testing_labels).numpy())\n \n del model\n print(\"*****************************************************************\")\n" }, { "alpha_fraction": 0.6657661199569702, "alphanum_fraction": 0.6846777200698853, "avg_line_length": 38.25757598876953, "blob_id": "a7a487ba44b003a8bd8a6c5490fe86117c84577c", "content_id": "8d055b4947ce1220be43123be150fc39f7d4fd66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2591, "license_type": "no_license", "max_line_length": 127, "num_lines": 66, "path": "/common/training.py", "repo_name": "apatyk/Daily-Pattern-Classifier", "src_encoding": "UTF-8", "text": "import random\nimport numpy as np\nimport tensorflow as tf\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Conv1D, GlobalAveragePooling1D\n\nimport tensorflow.keras.backend as kb\n\n### undersamples and trains model from samples and labels provided\ndef trainModel(training_samples, training_labels, data, winlength, epochs, modelpath):\n # undersample the training dataset\n eating_indices = [i for i, e in enumerate(training_labels) if e >= 0.5]\n noneating_indices = [i for i, e in enumerate(training_labels) if e < 0.5]\n undersampled_noneating_indices = random.sample(noneating_indices, len(eating_indices))\n undersampled_balanced_indices = eating_indices + undersampled_noneating_indices\n shuffled_undersampled_balanced_indices = undersampled_balanced_indices.copy()\n random.shuffle(shuffled_undersampled_balanced_indices)\n\n axdata = []\n aydata = []\n\n for i in shuffled_undersampled_balanced_indices:\n f = training_samples[i,0]\n t1 = training_samples[i,1]\n t2 = training_samples[i,2]\n sample = data[f][t1:t2]\n label = training_labels[i]\n axdata.append(sample)\n aydata.append(label)\n\n balanced_data = np.array(axdata, copy=True)\n balanced_labels = np.array(aydata, copy=True)\n del axdata\n del aydata\n \n print(\"Training on {:d} samples of length {:d}\".format(len(shuffled_undersampled_balanced_indices), len(balanced_data[0])))\n\n tf.keras.backend.clear_session()\n\n # use multiple GPUs\n strategy = tf.distribute.MirroredStrategy()\n with strategy.scope():\n mcp_save = keras.callbacks.ModelCheckpoint(modelpath, save_best_only=True, monitor='accuracy')\n\n model = Sequential()\n model.add(Conv1D(10, 44, strides=2,activation='relu', input_shape=(winlength, 6), name='input_layer'))\n model.add(Conv1D(10, 20, strides=2, activation='relu', kernel_regularizer=keras.regularizers.l1(0.01)))\n model.add(Conv1D(10, 4, strides=2, activation='relu', kernel_regularizer=keras.regularizers.l1(0.01)))\n model.add(GlobalAveragePooling1D())\n model.add(Dense(200, activation='relu'))\n model.add(Dense(1, activation='sigmoid', name='output_layer'))\n\n model.compile(loss='binary_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n\n H = model.fit(x=balanced_data, y=balanced_labels,\n epochs=epochs, batch_size=256, verbose=0,\n callbacks=[mcp_save])\n \n del balanced_data\n del balanced_labels\n \n return H, model\n" }, { "alpha_fraction": 0.6077498197555542, "alphanum_fraction": 0.6206662058830261, "avg_line_length": 41.03809356689453, "blob_id": "c8911a8511dbc7b585ee981b58ab9fa1cb63337e", "content_id": "d0da1f74609e8f5e4f11d65b1892968d1881b870", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4413, "license_type": "no_license", "max_line_length": 174, "num_lines": 105, "path": "/DailyPatternClassifier/TestDailyPatternRNN.py", "repo_name": "apatyk/Daily-Pattern-Classifier", "src_encoding": "UTF-8", "text": "# Adam Patyk\n# Clemson University\n# MS Thesis: Daily Pattern Classifier\n# Summer 2021\n\n# TestDailyPatternRNN.py\n# Purpose: Evaluate time and episode metrics of daily pattern classifier for k-fold cross validation\n# Usage: python TestDailyPatternRNN.py <threshold_val_start> <threshold_val_end> <threshold_val_step> <num_epochs>\n\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport sklearn.metrics\nfrom tqdm import tqdm\nfrom datetime import datetime\n\nsys.path.append('../') # for .py files in ../common/\nimport common.testing as testing\n\nif len(sys.argv) != 5:\n sys.exit(\"Usage: python TestDailyPatternRNN.py <threshold_start> <threshold_end> <threshold_step> <num_epochs>\") \n\nthresholds = np.arange(float(sys.argv[1]), float(sys.argv[2]),float(sys.argv[3]))\nk = 5\nepochs = int(sys.argv[4])\n\nresults = []\nstart_time = datetime.now()\n\nfor T in thresholds:\n print(f'T = {T}')\n total_TPR, total_TNR, total_F1, total_Prec, total_WAcc = [], [], [], [], []\n total_ep_TPR, total_ep_F1, total_ep_FP_TP = [], [], []\n\n for f in range(k):\n print(f'Fold {f+1}', flush=True)\n # read saved data from DailyPatternRNN scripts\n testing_sample_lengths = np.load(f'testing/testing_lengths_{epochs}epochs_fold{f+1}.npy')\n testing_probs = np.load(f'testing/testing_probs_{epochs}epochs_fold{f+1}.npy')\n testing_labels = np.load(f'testing/testing_labels_{epochs}epochs_fold{f+1}.npy')\n \n total_TP, total_FP, total_TN, total_FN = 0, 0, 0, 0\n total_ep_TP, total_ep_FP, total_ep_FN = 0, 0, 0\n\n # get episode metrics on testing dataset\n for i in tqdm(range(len(testing_labels))):\n probs = testing_probs[i,:testing_sample_lengths[i]]\n gt_labels = testing_labels[i,:testing_sample_lengths[i]]\n # thresholding segmentation\n h_results = testing.single_threshold(probs, gt_labels, winmin=6, stepsec=100, threshold=T)\n # time-based metrics\n TN, FP, FN, TP = sklearn.metrics.confusion_matrix(gt_labels, h_results['predictions'][0], labels=[0,1]).ravel()\n total_TP += TP\n total_FP += FP\n total_TN += TN\n total_FN += FN\n # episode-based metrics\n ep_TP, ep_FP, ep_FN = testing.calc_episode_metrics(h_results, gt_labels)\n total_ep_TP += ep_TP\n total_ep_FP += ep_FP\n total_ep_FN += ep_FN\n\n # calculate and report overall metrics\n TPR = testing.true_positive_rate(total_TP, total_FN)\n TNR = testing.true_negative_rate(total_TN, total_FP)\n F1 = testing.f1_score(total_TP, total_FP, total_FN)\n Prec = testing.precision(total_TP, total_FP)\n WAcc = testing.weighted_accuracy(total_TP, total_FP, total_TN, total_FN)\n \n ep_TPR = testing.true_positive_rate(total_ep_TP, total_ep_FN)\n ep_F1 = testing.f1_score(total_ep_TP, total_ep_FP, total_ep_FN)\n ep_FP_TP = -1 if total_ep_TP == 0 else total_ep_FP / total_ep_TP\n\n total_TPR.append(TPR)\n total_TNR.append(TNR)\n total_F1.append(F1)\n total_Prec.append(Prec)\n total_WAcc.append(WAcc)\n total_ep_TPR.append(ep_TPR)\n total_ep_F1.append(ep_F1)\n total_ep_FP_TP.append(ep_FP_TP)\n \n T_results = {'WAcc': np.mean(total_WAcc), 'TPR': np.mean(total_TPR), 'TNR': np.mean(total_TNR), 'F1': np.mean(total_F1), 'Precision': np.mean(total_Prec), \n 'Episode TPR': np.mean(total_ep_TPR), 'Episode F1': np.mean(total_ep_F1), 'Episode FP/TP': np.mean(total_ep_FP_TP)}\n results.append(T_results)\n\n print('AVERAGE:')\n print('--- Time Metrics ---')\n print(f'WAcc: {np.mean(total_WAcc):.3f}\\tTPR: {np.mean(total_TPR):.3f}\\tTNR: {np.mean(total_TNR):.3f}\\tF1: {np.mean(total_F1):.3f}\\tPrecision: {np.mean(total_Prec):.3f}')\n\n print('--- Episode Metrics ---')\n print(f'TPR: {np.mean(total_ep_TPR):.3f}\\tF1: {np.mean(total_ep_F1):.3f}\\tFP/TP: {np.mean(total_ep_FP_TP):.3f}')\n print(\"*****************************************************************\", flush=True)\n \n# prepare .csv file for export\nos.makedirs('results', exist_ok=True)\nresults_df = pd.DataFrame(results)\nresults_df.insert(0, 'Threshold', thresholds)\nresults_df.to_csv(f'results/testing-results-{epochs}epochs.csv', index=False, header=True)\n\nprint('Results saved.')\n\nend_time = datetime.now()\nprint(f'Duration: {end_time - start_time}')" }, { "alpha_fraction": 0.5495963096618652, "alphanum_fraction": 0.5988081693649292, "avg_line_length": 40.624000549316406, "blob_id": "daf1e2e288d0b565dcc6ddb35933981706102029", "content_id": "42f9afc5dafb19d2d0a100d404c2fad27c31a583", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5202, "license_type": "no_license", "max_line_length": 238, "num_lines": 125, "path": "/GenerateSamples/GenerateSamples.py", "repo_name": "apatyk/Daily-Pattern-Classifier", "src_encoding": "UTF-8", "text": "# Adam Patyk\n# Clemson University\n# MS Thesis: Daily Pattern Classifier\n# Summer 2021\n\n# GenerateSamples.py\n# Purpose: Generates daily samples for data augmentation\n# Usage: python GenerateSamples.py <window_length_minutes>\n\nimport sys\nimport os\nimport tensorflow as tf # updated for TensorFlow 2.2.0\nimport numpy as np\nimport math\nfrom datetime import datetime\nfrom tqdm import tqdm\n\nsys.path.append('../') # for .py files in ../common/\nimport common.loadfile as loadfile\nimport common.training as training\nimport common.testing as testing\n\nif len(sys.argv) != 2:\n sys.exit(\"Usage: python GenerateSamples.py <window_length_in_min>\") \n\n# prepare TensorFlow for GPU usage\ngpus = tf.config.experimental.list_physical_devices('GPU')\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\nlogical_gpus = tf.config.experimental.list_logical_devices('GPU')\nprint(len(gpus), \"Physical GPU,\", len(logical_gpus), \"Logical GPUs\")\n\nepochs = 30\nwin_min = int(sys.argv[1]) #6\ntrain_stride_sec = 15\ntest_stride_sec = 100\n\nwin_len = int(win_min * 60 * 15)\ntrain_step = int(train_stride_sec * 15)\ntest_step = int(test_stride_sec * 15)\nstart_time = datetime.now()\n\nsave_dir = 'samples/'\nos.makedirs(save_dir, exist_ok=True)\n\narr = [\"echo -n 'PBS: node is '; cat $PBS_NODEFILE\",\\\n \"echo PBS: job identifier is $PBS_JOBID\",\\\n \"echo PBS: job name is $PBS_JOBNAME\"]\n\n[os.system(cmd) for cmd in arr]\n\nprint(\"*****************************************************************\", flush=True)\nprint(\"Execution Started at \" + start_time.strftime(\"%m/%d/%Y, %H:%M:%S\"), flush=True)\nprint(\"Window Length: {:.2f} min ({:d} data)\\tTraining Slide: {:d} sec ({:d} data)\\tTesting Slide: {:d} sec ({:d} data)\\tEpochs: {:d}\".format(win_min, win_len, train_stride_sec, train_step, test_stride_sec, test_step, epochs), flush=True)\n\n# load the dataset for training wiht majority vote GT labeling for windows \nnum_files, all_training_data, training_samples_array, training_labels_array = loadfile.loadAllData3(win_len,\n train_step,\n removerest=0,\n removewalk=0,\n removebias=1)\n\n# load the dataset for testing with a different stride and GT labeling (center point)\nall_testing_data, testing_samples_array, testing_labels_array = loadfile.loadAllDataTesting('../common/batch-unix.txt', \n win_len, \n test_step, \n removebias=1)\n\nprint(\"Data loaded.\", flush=True)\n\n# normalize the datasets\nshimmer_global_mean = [-0.012359981,-0.0051663737,0.011612018,\n 0.05796114,0.1477952,-0.034395125 ]\n\nshimmer_global_stddev = [0.05756385,0.040893298,0.043825723, \n 17.199743,15.311142,21.229317 ]\n\nshimmer_trended_mean = [-0.000002,-0.000002,-0.000000,\n 0.058144,0.147621,-0.033260 ]\n\nshimmer_trended_stddev = [0.037592,0.034135,0.032263,\n 17.209038,15.321441,21.242532 ]\n\nall_zero_means = [0,0,0,0,0,0]\n\nmean_vals = all_zero_means\nstd_vals = shimmer_trended_stddev\n\nall_training_normalized = loadfile.globalZscoreNormalize(all_training_data, mean_vals, std_vals)\nall_testing_normalized = loadfile.globalZscoreNormalize(all_testing_data, mean_vals, std_vals)\n\nprint(\"Data normalized.\")\n\n# generate training samples from trained model\nnum_samples = 200000\nsubjects = [*range(num_files)]\nnum_subjects = len(subjects)\nnum_iterations = math.ceil(num_samples / num_subjects)\n\nprint(f'Generating training samples ({num_subjects} subjects)', flush=True)\n\nfor i in tqdm(range(num_iterations)):\n start_time = datetime.now()\n \n # train model on all training data\n H, model = training.trainModel(training_samples_array, training_labels_array, all_training_normalized, win_len, epochs, save_dir + f'tmp_{win_min}min.h5')\n \n # output P(E) and GT to text file for each recording using the trained model\n for s in subjects:\n subject_bool = np.isin(testing_samples_array[:,0], s)\n s_samples = testing_samples_array[subject_bool]\n s_labels = testing_labels_array[subject_bool]\n raw_samples, gt_labels = testing.get_raw_data(s_samples, s_labels, all_testing_normalized)\n if raw_samples.size != 0:\n probs = model.predict(raw_samples, batch_size=1024)\n result = np.hstack((np.reshape(gt_labels,(1,-1)).T, probs))\n np.savetxt(save_dir + f'W{win_min}_P{s:03.0f}_I{i:03.0f}.txt', result)\n \n tf.keras.backend.clear_session()\n del model\n \n end_time = datetime.now()\n print(f'Iteration Duration: {end_time - start_time}', flush=True)\n\nprint(f'{num_iterations * num_subjects} testing samples saved.')" }, { "alpha_fraction": 0.6182152628898621, "alphanum_fraction": 0.7166513204574585, "avg_line_length": 30.05714225769043, "blob_id": "bf941291dbb866ae4b70bbbe61750d7850872f88", "content_id": "ad1ff16deabe5bf4df529709b125a0659a71fce2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1087, "license_type": "no_license", "max_line_length": 131, "num_lines": 35, "path": "/DailyPatternClassifier/SubmitTrainRNNJob.py", "repo_name": "apatyk/Daily-Pattern-Classifier", "src_encoding": "UTF-8", "text": "import os\nimport numpy as np\n\n#PBS -l select=1:ncpus=28:mem=120gb:ngpus=2:gpu_model=p100:interconnect=fdr,walltime=4:00:00\n#PBS -l select=1:ncpus=40:mem=370gb:ngpus=2:gpu_model=v100:interconnect=hdr,walltime=4:00:00\n\nbatch_size = 64\nnum_units = 16\nepochs = 50\n\npbsfile = open(\"job.pbs\", \"w\")\n\npbsfile.write(\"\"\"#PBS -u apatyk\n#PBS -N DP_E{:d}\n#PBS -l select=1:ncpus=40:mem=370gb:ngpus=2:gpu_model=v100:interconnect=hdr,walltime=16:00:00\n#PBS -m ae\n#PBS -o /home/apatyk/Research/NewModels/results-gru-b{:d}-u{:d}-e{:d}.txt\n#PBS -j oe\n\n\"\"\".format(epochs,batch_size,num_units,epochs))\n\npbsfile.write(\"\"\"ulimit -c 0\nsource /software/spackages/linux-centos8-x86_64/gcc-8.3.1/anaconda3-2019.10-v5cuhr6keyz5ryxcwvv2jkzfj2gwrj4a/etc/profile.d/conda.sh\nmodule load cuda/10.2.89-gcc/8.3.1 cudnn/8.0.0.180-10.2-linux-x64-gcc/8.3.1 anaconda3/2019.10-gcc/8.3.1\nconda activate tf_env\ncd ~/Research/NewModels/ \npython TrainDailyPatternRNN.py {:d} {:d} {:d}\n\"\"\".format(batch_size,num_units,epochs))\npbsfile.write(\"\"\"if [ $? -ne 0 ]; then\nrm core.*\nfi\nexit\"\"\")\n\npbsfile.close()\nos.system(\"qsub job.pbs\")\n" }, { "alpha_fraction": 0.7456232905387878, "alphanum_fraction": 0.7563804984092712, "avg_line_length": 90.17308044433594, "blob_id": "efaf473a2bb81d5d580957568a46114c9f664103", "content_id": "00aef7d30528098f9b1f683763788335b8f298f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4741, "license_type": "no_license", "max_line_length": 455, "num_lines": 52, "path": "/README.md", "repo_name": "apatyk/Daily-Pattern-Classifier", "src_encoding": "UTF-8", "text": "# Daily Pattern Classifier\n---\n\n## Overview\n\nThis code is from M.S. research in applying deep learning and recurrent neural networks to eating detection. The M.S. thesis that accompanies this research can be found here, [\"Detecting eating episodes from daily patterns of wrist motion using recurrent neural networks\"](http://cecas.clemson.edu/~ahoover/theses/patyk-thesis.pdf). The Clemson All-Day (CAD) dataset used in this project is [publicly available](http://cecas.clemson.edu/~ahoover/allday/).\n\n## Requirements\n\nThis project requires the following:\n- Python 3.8.3\n- [TensorFlow 2.2.0](https://www.tensorflow.org/versions/r2.2/api_docs/python/tf)\n- NumPy 1.19.5\n- Pandas 1.1.1\n- [tqdm 4.61.2](https://tqdm.github.io)\n- scikit-learn 0.23.2\n\n## Repository Structure\n\n common/ // directory for scripts and text files used by other programs\n batch-unix.txt // filenames for all recordings in CAD dataset\n meals-shimmer.txt // information for all meals in CAD dataset\n loadfile.py // contains functions for loading CAD dataset data\n testing.py // contains functions for evaluation\n training.py // contains functions for training window-based classifier (Sharma 2020)\n\n GenerateSamples/ // code to generate daily samples for training daily pattern classifier\n GenerateSamples.ipynb // Jupyter notebook for generating daily samples\n GenerateSamples.py // Python program for generating daily samples\n LoadFiles.ipynb // loads many daily sample text files and saves as combined .npy files\n SubmitGenSamplesJob.py // script to run GenerateSamples on Palmetto cluster as a PBS job\n\n DailyPatternClassifier/ // code to train and evaluate the daily pattern classifier\n DailyPatternRNN.ipynb // performs k-fold cross validation for training AND testing (Jupyter notebook)\n TrainDailyPatternRNN.py // performs training for k-fold cross validation\n TestDailyPatternRNN.py // evaluates time and episode metrics post-hoc for k-fold cross validation\n SubmitTrainRNNJob.py // script to run TrainDailyPatternRNN on Palmetto cluster as a PBS job\n \n## Code Description\n\nCode can be found at [https://github.com/apatyk/Daily-Pattern-Classifier](https://github.com/apatyk/Daily-Pattern-Classifier). Each program or script mentioned above is briefly described here.\n\n1. **`loadfile.py`:** This Python file contains a variety of functions used to load the Shimmer data from the Clemson All-Day (CAD) dataset. This code is adapted from previous work (Sharma 2020).\n2. **`testing.py`:** This Python file contains several functions and their related auxiliary functions for testing the window-based classifier from Sharma as well as the daily pattern classifier.\n3. **`training.py`:** This Python file encapsulates training code for the window-based classifier.\n4. **`GenerateSamples.ipynb`/`.py`:** These programs perform data augmentation and generate a collection of daily samples needed to train the daily pattern classifier.\n5. **`SubmitGenSamplesJob.py`:** This Python script creates a PBS script to submit a GenerateSamples job to the Palmetto cluster. This is the preferred use of these scripts as GenerateSamples takes more than 24 hours to run to generate 200,000 samples for a 6 minute window.\n6. **`LoadFiles.ipynb`:** This Jupyter notebook loads all of the daily sample text files generated by GenerateSamples and saves them collectively to binary NumPy files `.npy`. Data is divided into files for samples, labels, and filenames. Cells are included in the notebook to load and export a single directory or multiple directories (in parallel). \n7. **`TrainDailyPatternRNN.py`:** This Python program trains daily pattern classifiers for k-fold cross validation using bundled daily sample arrays from LoadFiles. The number of training epochs, batch size, and number of recurrent units in the model architecture can be defined in command line arguments. This is utilized by the SubmitTrainRNNJob script.\n8. **`TestDailyPatternRNN.py`:** This Python program evaluates time and episode metrics post-hoc for k-fold cross validation and saves the results to a CSV file. Varying thresholds can be used for thresholding eating episode segmentation.\n9. **`SubmitTrainRNNJob.py`:** This Python script creates a PBS script to submit a TrainDailyPatternRNN job to the Palmetto cluster.\n10. **`DailyPatternRNN.ipynb`:** This Jupyter notebook trains AND tests daily pattern classifiers with k-fold cross validation. This notebook performs the functions of `TrainDailyPatternRNN` and `TestDailyPatternRNN` together at a fixed post-processing threshold.\n" } ]
9
inzaghian/anzhu
https://github.com/inzaghian/anzhu
b1121c79d2ec4054e04bf648962a59c6916e63b4
41a202a8f2d0e9809265c17382fef1ba3e04c8e2
7fe4e230b0fde61b3b63a7242b26f8a97b623db5
refs/heads/master
2021-01-21T13:36:58.495536
2019-08-16T06:01:28
2019-08-16T06:01:28
33,847,530
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5981996655464172, "alphanum_fraction": 0.6405482888221741, "avg_line_length": 47.869998931884766, "blob_id": "cd75d7850b6aaee753243204458f89f792a7ed2e", "content_id": "cc589fd56392d3c2c1ca35e7f5628f7b870093d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4936, "license_type": "no_license", "max_line_length": 70, "num_lines": 100, "path": "/ui/comset.py", "repo_name": "inzaghian/anzhu", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'comset.ui'\n#\n# Created by: PyQt5 UI code generator 5.11.3\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_comsetform(object):\n def setupUi(self, comsetform):\n comsetform.setObjectName(\"comsetform\")\n comsetform.resize(213, 193)\n self.label = QtWidgets.QLabel(comsetform)\n self.label.setGeometry(QtCore.QRect(24, 26, 54, 16))\n self.label.setObjectName(\"label\")\n self.cb_com = QtWidgets.QComboBox(comsetform)\n self.cb_com.setGeometry(QtCore.QRect(90, 26, 101, 20))\n self.cb_com.setObjectName(\"cb_com\")\n self.cb_bsp = QtWidgets.QComboBox(comsetform)\n self.cb_bsp.setGeometry(QtCore.QRect(90, 52, 101, 20))\n self.cb_bsp.setFrame(True)\n self.cb_bsp.setObjectName(\"cb_bsp\")\n self.cb_bsp.addItem(\"\")\n self.cb_bsp.addItem(\"\")\n self.cb_bsp.addItem(\"\")\n self.cb_bsp.addItem(\"\")\n self.cb_bsp.addItem(\"\")\n self.cb_bsp.addItem(\"\")\n self.label_2 = QtWidgets.QLabel(comsetform)\n self.label_2.setGeometry(QtCore.QRect(24, 52, 54, 16))\n self.label_2.setObjectName(\"label_2\")\n self.cb_data = QtWidgets.QComboBox(comsetform)\n self.cb_data.setGeometry(QtCore.QRect(90, 78, 101, 20))\n self.cb_data.setObjectName(\"cb_data\")\n self.cb_data.addItem(\"\")\n self.cb_data.addItem(\"\")\n self.label_3 = QtWidgets.QLabel(comsetform)\n self.label_3.setGeometry(QtCore.QRect(24, 78, 54, 16))\n self.label_3.setObjectName(\"label_3\")\n self.cb_stop = QtWidgets.QComboBox(comsetform)\n self.cb_stop.setGeometry(QtCore.QRect(90, 130, 101, 20))\n self.cb_stop.setObjectName(\"cb_stop\")\n self.cb_stop.addItem(\"\")\n self.cb_stop.addItem(\"\")\n self.cb_stop.addItem(\"\")\n self.label_4 = QtWidgets.QLabel(comsetform)\n self.label_4.setGeometry(QtCore.QRect(24, 130, 54, 16))\n self.label_4.setObjectName(\"label_4\")\n self.cb_p = QtWidgets.QComboBox(comsetform)\n self.cb_p.setGeometry(QtCore.QRect(90, 104, 101, 20))\n self.cb_p.setObjectName(\"cb_p\")\n self.cb_p.addItem(\"\")\n self.cb_p.addItem(\"\")\n self.cb_p.addItem(\"\")\n self.label_5 = QtWidgets.QLabel(comsetform)\n self.label_5.setGeometry(QtCore.QRect(24, 104, 54, 16))\n self.label_5.setObjectName(\"label_5\")\n self.btn_default = QtWidgets.QPushButton(comsetform)\n self.btn_default.setGeometry(QtCore.QRect(84, 160, 50, 23))\n self.btn_default.setObjectName(\"btn_default\")\n self.btn_save = QtWidgets.QPushButton(comsetform)\n self.btn_save.setGeometry(QtCore.QRect(24, 160, 50, 23))\n self.btn_save.setObjectName(\"btn_save\")\n self.btn_esc = QtWidgets.QPushButton(comsetform)\n self.btn_esc.setGeometry(QtCore.QRect(144, 160, 50, 23))\n self.btn_esc.setObjectName(\"btn_esc\")\n\n self.retranslateUi(comsetform)\n self.cb_bsp.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(comsetform)\n\n def retranslateUi(self, comsetform):\n _translate = QtCore.QCoreApplication.translate\n comsetform.setWindowTitle(_translate(\"comsetform\", \"串口设置\"))\n self.label.setText(_translate(\"comsetform\", \"串口\"))\n self.cb_bsp.setCurrentText(_translate(\"comsetform\", \"115200\"))\n self.cb_bsp.setItemText(0, _translate(\"comsetform\", \"115200\"))\n self.cb_bsp.setItemText(1, _translate(\"comsetform\", \"4800\"))\n self.cb_bsp.setItemText(2, _translate(\"comsetform\", \"9600\"))\n self.cb_bsp.setItemText(3, _translate(\"comsetform\", \"19200\"))\n self.cb_bsp.setItemText(4, _translate(\"comsetform\", \"38400\"))\n self.cb_bsp.setItemText(5, _translate(\"comsetform\", \"57600\"))\n self.label_2.setText(_translate(\"comsetform\", \"波特率\"))\n self.cb_data.setCurrentText(_translate(\"comsetform\", \"8\"))\n self.cb_data.setItemText(0, _translate(\"comsetform\", \"8\"))\n self.cb_data.setItemText(1, _translate(\"comsetform\", \"7\"))\n self.label_3.setText(_translate(\"comsetform\", \"数据位\"))\n self.cb_stop.setItemText(0, _translate(\"comsetform\", \"1\"))\n self.cb_stop.setItemText(1, _translate(\"comsetform\", \"1.5\"))\n self.cb_stop.setItemText(2, _translate(\"comsetform\", \"2\"))\n self.label_4.setText(_translate(\"comsetform\", \"停止位\"))\n self.cb_p.setItemText(0, _translate(\"comsetform\", \"NONE\"))\n self.cb_p.setItemText(1, _translate(\"comsetform\", \"ODD\"))\n self.cb_p.setItemText(2, _translate(\"comsetform\", \"EVEN\"))\n self.label_5.setText(_translate(\"comsetform\", \"校验位\"))\n self.btn_default.setText(_translate(\"comsetform\", \"默认\"))\n self.btn_save.setText(_translate(\"comsetform\", \"确定\"))\n self.btn_esc.setText(_translate(\"comsetform\", \"取消\"))\n\n" }, { "alpha_fraction": 0.5461200475692749, "alphanum_fraction": 0.5739384889602661, "avg_line_length": 27.5, "blob_id": "2be48ce5b330b6d93f54e695a25328156b936f98", "content_id": "9389af123a571664e500b09852140d03b19d21c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 683, "license_type": "no_license", "max_line_length": 69, "num_lines": 24, "path": "/xmlreadandwrite.py", "repo_name": "inzaghian/anzhu", "src_encoding": "UTF-8", "text": "#coding:utf-8\nimport xml.etree.ElementTree as ET\n#{'com': 'COM1', 'bsp': '115200', 'd': '8', 'p': 'NONE', 's': '1'}\ndef WriteXml(sl):\n root=ET.Element(\"com\")\n for e in sl.keys():\n l=ET.SubElement(root,\"set\")\n l.attrib={'name':e,'value':sl[e]}\n tree = ET.ElementTree(root)\n tree.write(\"setmsg.xml\")\n\ndef ReadXml(spath):\n root=ET.parse(spath)\n p=root.findall('.')\n xmllist={}\n for oneper in p:\n for child in oneper.getchildren():\n xmllist[child.attrib['name']]=child.attrib['value']\n return xmllist\n\n#sl={'com': 'COM1', 'bsp': '115200', 'd': '8', 'p': 'NONE', 's': '1'}\n#WriteXml(sl)\n#spath=\"setmsg.xml\"\n#print(ReadXml(spath))" }, { "alpha_fraction": 0.5596412420272827, "alphanum_fraction": 0.5940209031105042, "avg_line_length": 36.59550476074219, "blob_id": "5b8c57362d4506df9bb8310de15e3868007cb1d0", "content_id": "f980ce9d650635442a968d72fbaf94947bd36c5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3345, "license_type": "no_license", "max_line_length": 125, "num_lines": 89, "path": "/testform.py", "repo_name": "inzaghian/anzhu", "src_encoding": "UTF-8", "text": "#coding:utf-8\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import pyqtSignal,QTimer,Qt\nfrom PyQt5.QtGui import QIcon,QImage,QPixmap\nfrom ui.test import Ui_TestForm\nimport cv2\nfrom MyQR import myqr\nimport numpy as np\n\nclass Testwindow(QtWidgets.QWidget):\n\n _signal = pyqtSignal(bytes)\n\n def __init__(self):\n super(Testwindow,self).__init__()\n self.new=Ui_TestForm()\n self.new.setupUi(self)\n self.new.btn_generate.clicked.connect(self.QrImageCvt)\n self.new.btn_start_captrue.clicked.connect(self.startCapture)\n #self.startCapture()\n #self.QrImageCvt()\n\n def QrImageCvt(self):\n head = 'DQY'\n model = '19016'\n date = '1908'\n factory = '01'\n pin=self.new.lineEdit.text()\n cnt = self.new.lineEditCnt.text()\n cnt = int(cnt)\n pin = int(pin)\n for i in range(0, cnt):\n serial = str(\"%04d\"%(pin+i))\n name = head + model + date + factory + serial\n myqr.run(words=name,save_name='./Icon/qr.png')\n p2 = cv2.imread('./Icon/qr.png')\n p1 = cv2.imread('./Icon/pic.jpg')\n image_height, image_width, image_depth = p1.shape\n x = 65\n y = 50\n w = 175\n p2 = cv2.resize(p2,(w,w))\n p1[image_height-w-y:image_height-y,image_width-w-x:image_width-x] = p2\n p1 = cv2.putText(p1, 'SN:'+name, (image_width-350,image_height-50), cv2.FONT_HERSHEY_COMPLEX_SMALL,1.0,(0,0,0),2)\n QIm = cv2.cvtColor(p1, cv2.COLOR_BGR2RGB)\n self.new.label.setGeometry(20, 10, image_width, image_height)\n QIm = QImage(QIm.data, image_width, image_height, image_width * image_depth, QImage.Format_RGB888)\n QPix = QPixmap.fromImage(QIm)\n dx1 = 50\n dx2 = dx1+10\n dy1 = 90\n dy2 = 40\n cv2.imwrite('./Icon/temp/'+name+'.png',p1[dy1:image_height-dy2,dx1:image_width-dx2])\n self.new.label.setPixmap(QPix)\n\n def startCapture(self):\n self.setWindowIcon(QIcon('./Icon/dqy.png'))\n png=QtGui.QPixmap('./Icon/dqy.png')\n self.timer = QTimer()\n self.timer.setTimerType(Qt.TimerType.PreciseTimer)\n self.timer.timeout.connect(self.update)\n self.timer.start(10)\n self.cap = cv2.VideoCapture(0)\n self.image = QImage()\n self.width = 640\n self.height = 480\n self.detector = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')\n \n def update(self):\n ret,frame = self.cap.read()\n height,width,depth = frame.shape\n gray = cv2.cvtColor(frame, code = cv2.COLOR_BGR2GRAY)\n face_zone = self.detector.detectMultiScale(gray,scaleFactor=1.2,minNeighbors = 5)\n for x,y,w,h in face_zone:\n cv2.circle(frame,center = (x+w//2,y+h//2),radius = w//2, color = [0,0,255])\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n self.image = QImage(frame, width,height,3*width,QImage.Format_RGB888)\n pixmap = QPixmap.fromImage(self.image)\n self.new.label.setPixmap(pixmap)\n \nif __name__ == '__main__':\n import sys\n from PyQt5 import QtWidgets\n from uartform import Uartwindow\n\n app = QtWidgets.QApplication(sys.argv)\n uf = Testwindow()\n uf.show()\n sys.exit(app.exec_())" }, { "alpha_fraction": 0.5119959115982056, "alphanum_fraction": 0.5237365961074829, "avg_line_length": 22.321428298950195, "blob_id": "534d284bced7f51ab4100f393b5310b510460800", "content_id": "2835d58693a207b0120dd3775d88bb7e8f2b8d90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1967, "license_type": "no_license", "max_line_length": 71, "num_lines": 84, "path": "/com.py", "repo_name": "inzaghian/anzhu", "src_encoding": "UTF-8", "text": "#coding:utf-8\nimport serial\nimport serial.tools.list_ports\n\n#this is modify by any one\nclass opencom():\n def __init__(self):\n self.com=serial.Serial()\n\n def initcom(self,comname,bsp=115200,bs=8,s=1,p=serial.PARITY_NONE):\n try:\n self.com.port = comname\n self.com.baudrate = bsp\n self.com.bytesize = bs \n self.com.stopbits = s\n self.com.parity = p\n except Exception as e:\n print(e)\n\n def isopen(self):\n return self.com.isOpen()\n\n def opencom(self):\n try:\n self.com.open()\n except Exception as e:\n print(e)\n return self.com.isOpen()\n\n def CloseCom(self):\n if self.com.isOpen():\n self.com.close()\n print(\"串口关闭\")\n\n def Get_ports(self):\n clist=[]\n port_list = list(serial.tools.list_ports.comports())\n if len(port_list)> 0:\n clist=[]\n for e in port_list:\n port_list_0 =list(e)\n port_serial = port_list_0[0]\n clist.append(port_serial)\n return clist\n\n def Get_p(self,p):\n pstate=serial.PARITY_NONE\n if p==\"ODD\":\n pstate=serial.PARITY_ODD\n elif p==\"EVEN\":\n pstate=serial.PARITY_EVEN\n return pstate\n\n def comwritebytes(self,b):\n wlen=self.com.write(b)\n return wlen\n\n def comwritestring(self,b):\n wlen=self.com.write(b.encode(\"utf-8\"))\n return wlen \n\n def HexToString(self,b):\n rdata=\"\"\n for e in b:\n rdata+=hex(e)+\" \"\n rdata=rdata[:-1]\n return rdata\n\n def comreadbytes(self):\n slen=self.com.in_waiting\n sdata=b''\n if slen>0:\n sdata = self.com.read(slen)\n return sdata\n\n\"\"\"\nc1=opencom()\nclist=c1.Get_ports()\nif len(clist)>0:\n comname=clist[0]\n c1.initcom(comname)\n if c1.opencom():\n c1.CloseCom()\n\"\"\"\n" }, { "alpha_fraction": 0.6830357313156128, "alphanum_fraction": 0.6919642686843872, "avg_line_length": 19.454545974731445, "blob_id": "ef3fdde5d825f674c2c905baa0fb571b54eec2ce", "content_id": "a692841caad12611f6285bea8f0f924062f6b041", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 224, "license_type": "no_license", "max_line_length": 39, "num_lines": 11, "path": "/main.py", "repo_name": "inzaghian/anzhu", "src_encoding": "UTF-8", "text": "#coding:utf-8\nfrom PyQt5 import QtWidgets\nfrom uartform import Uartwindow\nimport sys\ndef main():\n\tapp = QtWidgets.QApplication(sys.argv)\n\tuf = Uartwindow()\n\tuf.show()\n\tsys.exit(app.exec_())\nif __name__ == '__main__':\n\tmain()" }, { "alpha_fraction": 0.5627284049987793, "alphanum_fraction": 0.5657734274864197, "avg_line_length": 28.339284896850586, "blob_id": "fbba209bea41ac4b95eb5c9586bf450d73aee5ac", "content_id": "dc11948f697586d6b9ae429c84d0a1807265b39e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1642, "license_type": "no_license", "max_line_length": 61, "num_lines": 56, "path": "/comsetform.py", "repo_name": "inzaghian/anzhu", "src_encoding": "UTF-8", "text": "#coding:utf-8\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtGui import QIcon\nfrom ui.comset import Ui_comsetform\n\nclass Comsetwindow(QtWidgets.QWidget):\n\n _signal = pyqtSignal(dict)\n\n def __init__(self):\n super(Comsetwindow, self).__init__()\n self.new = Ui_comsetform()\n self.new.setupUi(self)\n self.new.btn_save.clicked.connect(self.Get_set)\n\n def initcom(self, clist):\n self.new.cb_com.clear()\n self.new.cb_com.addItems(clist)\n self.setWindowIcon(QIcon('./Icon/dqy.png'))\n\n def Get_set(self):\n sl = {}\n com = self.new.cb_com.currentText()\n bsp = self.new.cb_bsp.currentText()\n d = self.new.cb_data.currentText()\n p = self.new.cb_p.currentText()\n s = self.new.cb_stop.currentText()\n sl = {'com': com, 'bsp': bsp, 'd': d, 'p': p, 's': s}\n self._signal.emit(sl)\n self.close()\n \n def set_com(self, msg):\n try:\n com = msg['com']\n bsp = msg['bsp']\n d = msg['d']\n s = msg['s']\n p = msg['p']\n self.new.cb_com.setCurrentText(com)\n bsp=self.new.cb_bsp.setCurrentText(bsp)\n d=self.new.cb_data.setCurrentText(d)\n p=self.new.cb_p.setCurrentText(s)\n s=self.new.cb_stop.setCurrentText(p)\n except Exception as e:\n print(e)\n\nif __name__ == '__main__':\n import sys\n from PyQt5 import QtWidgets\n from uartform import Uartwindow\n\n app = QtWidgets.QApplication(sys.argv)\n uf = Comsetwindow()\n uf.show()\n sys.exit(app.exec_())" }, { "alpha_fraction": 0.6318359375, "alphanum_fraction": 0.6376953125, "avg_line_length": 25.28205108642578, "blob_id": "4c59600c1f10f0e292bee889b0cce2944b5027c0", "content_id": "1e16a0d943ef09b583a221442f41d62238779476", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1024, "license_type": "no_license", "max_line_length": 63, "num_lines": 39, "path": "/pinsetform.py", "repo_name": "inzaghian/anzhu", "src_encoding": "UTF-8", "text": "#coding:utf-8\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtGui import QIcon\nfrom ui.pinset import Ui_PinSetForm\n\nclass Pinsetwindow(QtWidgets.QWidget):\n\n _signal = pyqtSignal(bytes)\n\n def __init__(self):\n super(Pinsetwindow,self).__init__()\n self.new=Ui_PinSetForm()\n self.new.setupUi(self)\n self.new.lineEdit.returnPressed.connect(self.DataInput)\n self.initcom()\n\n def initcom(self):\n self.setWindowIcon(QIcon('./Icon/dqy.png'))\n png=QtGui.QPixmap('./Icon/dqy.png')\n self.new.label.setPixmap(png)\n\n def DataInput(self):\n data=self.new.lineEdit.text()\n print(data)\n self._signal.emit(data.encode(\"utf-8\"))\n self.new.lineEdit.clear()\n self.close()\n\n \nif __name__ == '__main__':\n import sys\n from PyQt5 import QtWidgets\n from uartform import Uartwindow\n\n app = QtWidgets.QApplication(sys.argv)\n uf = Pinsetwindow()\n uf.show()\n sys.exit(app.exec_())" }, { "alpha_fraction": 0.534778892993927, "alphanum_fraction": 0.5388578772544861, "avg_line_length": 29.749174118041992, "blob_id": "3c87128ecd2c7f04ad2bf44d76ecdf218b3ab18a", "content_id": "f2135220795a52282f74869b9195c68856b0b92b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9462, "license_type": "no_license", "max_line_length": 102, "num_lines": 303, "path": "/uartform.py", "repo_name": "inzaghian/anzhu", "src_encoding": "UTF-8", "text": "#coding:utf-8\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import pyqtSignal,QThread,QTimer,Qt\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtGui import QIcon\nfrom ui.uart import Ui_uartform\nfrom comsetform import Comsetwindow\nfrom pinsetform import Pinsetwindow\nfrom testform import Testwindow\nfrom com import opencom\nfrom xmlreadandwrite import WriteXml,ReadXml\nimport time\n\n\nclass Uthread(QThread): \n _signal = pyqtSignal(bytes) \n \n def __init__(self, parent=None): \n super(Uthread, self).__init__()\n\n def initcom(self,com):\n self.com=com\n\n def SetAlive(self,alive):\n self.alive=alive\n \n def run(self):\n while self.alive:\n try:\n sdata=self.com.comreadbytes()\n self._signal.emit(sdata)\n except Exception as e:\n print(e)\n break\n\nclass Uartwindow(QtWidgets.QWidget):\n \n def __init__(self): \n super(Uartwindow,self).__init__() \n self.new = Ui_uartform()\n self.new.setupUi(self)\n self.InitData()\n\n def InitData(self):\n self.cw=Comsetwindow()\n self.PinWin=Pinsetwindow()\n self.PinWin._signal.connect(self.PinSet)\n self.com=opencom()\n self.cw._signal.connect(self.callcw)\n self.TestWin = Testwindow()\n self.new.btn_setcom.clicked.connect(self.ShowCw)\n self.new.btn_search.clicked.connect(self.searchcom)\n #self.new.btn_open.clicked.connect(self.OpneCom)\n self.new.btn_open.clicked.connect(self.btn_opencom)\n self.new.btn_send.clicked.connect(self.WriteData)\n self.new.btn_receive.clicked.connect(self.ReadData)\n self.new.btn_clear.clicked.connect(self.ClearMsg)\n self.searchcom()\n #self.new.cb_receive.setChecked(False)\n self.thread = None\n self.rtim=QTimer()\n self.rtim.setTimerType(Qt.TimerType.PreciseTimer)\n self.rtim.timeout.connect(self.callrtim)\n self.logpath=\"./log/\" + str(time.strftime(\"%Y_%m_%d_%H_%M_%S\", time.localtime())) + '_log.txt'\n self.ShowLog(self.logpath)\n self.svaedata=\"\"\n try:\n self.sl=ReadXml('setmsg.xml')\n self.callcw(self.sl,b=1)\n except Exception as e:\n self.ShowLog(str(e))\n self.setWindowIcon(QIcon('./Icon/dqy.png'))\n self.new.btn_help_cmd.clicked.connect(self.HelpCmd)\n self.new.btn_reboot_cmd.clicked.connect(self.RebootCmd)\n self.new.btn_dev_info_cmd.clicked.connect(self.DevInfoCmd)\n self.new.btn_log_on_cmd.clicked.connect(self.LogOnCmd)\n self.new.btn_log_off_cmd.clicked.connect(self.LogOffCmd)\n self.new.btn_pin_cmd.clicked.connect(self.InputPinCmd)\n self.new.btn_dev_reset_cmd.clicked.connect(self.DevResetCmd)\n self.new.btn_test.clicked.connect(self.TestStart)\n png=QtGui.QPixmap('./Icon/dqy.png')\n self.new.l1.setPixmap(png)\n \n def starttim(self):\n self.rtim.start(10)\n\n def stoptim(self):\n self.rtim.stop()\n\n def callrtim(self):\n sdata=self.com.comreadbytes()\n self.callbacklog(sdata)\n\n #开始串口接收线程\t\n def StartThread(self):\n # 创建线程 \n self.thread = Uthread() \n self.thread.initcom(self.com)\n self.thread.SetAlive(True)\t\t\n self.thread._signal.connect(self.callbacklog)\t\t\n # 开始线程 \n self.thread.start() \n\n #停止串口线程\n def StopThread(self):\n if self.thread is not None:\n self.thread.SetAlive(False)\n self.thread.quit()\n self.thread.wait()\n self.thread.exit()\n self.thread = None\n \n def callbacklog(self,msg):\n if len(msg)>0:\n cbcheck=self.new.cb_receive.checkState()\n hdata=\"\"\n try:\n if cbcheck:\n hdata=self.com.HexToString(msg)\n self.ShowMsg(hdata)\n else:\n hdata=msg.decode('utf-8','replace')\n self.ShowMsg(hdata)\n self.WriteLog(hdata+\"\\r\\n\")\n except Exception as e:\n self.ShowMsg(str(e))\n\n def ShowCw(self):\n self.searchcom()\n self.cw.set_com(self.sl)\n self.cw.show()\n\n def callcw(self,msg,b=0):\n if msg:\n self.ShowLog(str(msg))\n try:\n com=msg['com']\n bsp=msg['bsp']\n d=msg['d']\n s=msg['s']\n p=msg['p']\n rp=self.com.Get_p(p)\n self.com.initcom(com,int(bsp),int(d),int(s),rp)\n if b==0:\n WriteXml(msg)\n self.ShowLog(\"串口设置成功\")\n if self.com.isopen():\n self.OpenCom(\"关闭\")\n self.btn_opencom()\n except Exception as e:\n self.ShowBox(str(e))\n\n def WriteLog(self,sdata,b=0):\n self.svaedata+=sdata\n if len(self.svaedata)>=512 or b==1:\n with open(self.logpath,'a',encoding='utf-8') as f: \n f.write(self.svaedata)\n f.close()\n self.svaedata=\"\"\n \n def btn_opencom(self):\n t=self.new.btn_open.text()\n self.OpenCom(t)\n \n def OpenCom(self,t):\n try:\n if t==\"打开\":\n comname=self.new.cb_comname.currentText()\n self.com.initcom(comname=comname)\n if(self.com.opencom()):\n self.new.btn_open.setText(\"关闭\")\n self.new.btn_open.setStyleSheet(\"background-color:gold\")\n self.ShowLog(\"串口打开\")\n #self.StartThread()\n self.starttim()\n else:\n self.ShowLog(\"打开失败\")\n elif t==\"关闭\":\n self.com.CloseCom()\n if(self.com.isopen()):\n self.ShowLog(\"关闭失败!\")\n else:\n #self.StopThread()\n self.stoptim()\n self.ShowLog(\"串口关闭\")\n self.new.btn_open.setText(\"打开\")\n self.new.btn_open.setStyleSheet(\"\")\n except Exception as e:\n self.ShowBox(str(e)) \n\n def searchcom(self):\n clist=self.com.Get_ports()\n self.new.cb_comname.clear()\n self.new.cb_comname.addItems(clist)\n self.cw.initcom(clist)\n\n def ShowMsg(self, msg):\n #self.new.txt_show.append(msg+\"\\r\\n\")\n self.new.txt_show.append(msg)\n self.new.txt_show.moveCursor(QtGui.QTextCursor.End)\n \n def ShowLog(self, msg):\n #self.new.log_show.append(msg+\"\\r\\n\")\n self.new.log_show.append(msg)\n self.new.log_show.moveCursor(QtGui.QTextCursor.End)\n\n def ClearMsg(self):\n self.new.txt_show.clear()\n\n def ShowBox(self,msg,title=\"串口收发数据\"):\n QMessageBox.information(self,title, msg, QMessageBox.Ok)\n\n def closeEvent(self, event):\n try:\n self.cw.close()\n self.StopThread()\n self.stoptim()\n self.com.CloseCom()\n self.WriteLog(\"\",b=1)\n except Exception as e:\n self.ShowLog(str(e))\n\n def HexToBytes(self): #11 22 33 44 55 \n bl=[]\n try:\n text=self.new.txt_send.text()\n slist=text.split(\" \")\n for e in slist:\n b=int(e,16)\n bl.append(b)\n except Exception as e:\n self.ShowBox(str(e))\n return bl\n\n def WriteData(self):\n try:\n slen=0\n msg=self.new.txt_send.text()\n cbcheck=self.new.cb_send.checkState()\n if cbcheck:\n bl=self.HexToBytes()\n slen=self.com.comwritebytes(bl)\n else:\n slen=self.com.comwritestring(msg)\n self.ShowLog(\"发送数据长度\"+str(slen))\n except Exception as e:\n self.ShowBox(str(e))\n\n def ReadData(self):\n sdata=self.com.comreadbytes()\n if len(sdata)>0:\n cbcheck=self.new.cb_receive.checkState()\n try:\n if cbcheck:\n hdata=self.com.HexToString(sdata)\n self.ShowMsg(hdata)\n else:\n hdata=sdata.decode('utf-8','replace')\n self.ShowMsg(hdata)\n except Exception as e:\n self.ShowLog(str(e))\n \n def CmdSend(self, cmd):\n len = self.com.comwritestring(cmd)\n self.ShowLog(\"发送数据长度\"+str(len))\n\n def HelpCmd(self):\n self.CmdSend(\"HELP\")\n\n def RebootCmd(self):\n self.CmdSend(\"REBOOT\")\n\n def DevInfoCmd(self):\n self.CmdSend(\"DEV_INFO\")\n\n def LogOnCmd(self):\n self.CmdSend(\"LOG:ON\")\n\n def LogOffCmd(self):\n self.CmdSend(\"LOG:OFF\")\n\n def InputPinCmd(self):\n self.PinWin.show()\n\n def PinSet(self,msg):\n self.CmdSend(\"PIN:\"+ msg.decode(\"utf-8\"))\n self.ShowLog(msg.decode(\"utf-8\"))\n\n def DevResetCmd(self):\n self.CmdSend(\"DEV_RESET\")\n\n def TestStart(self):\n self.TestWin.show()\n \nif __name__ == '__main__':\n import sys\n from PyQt5 import QtWidgets\n from uartform import Uartwindow\n\n app = QtWidgets.QApplication(sys.argv)\n uf = Uartwindow()\n uf.show()\n sys.exit(app.exec_())" }, { "alpha_fraction": 0.6707289814949036, "alphanum_fraction": 0.6931087970733643, "avg_line_length": 50.86206817626953, "blob_id": "9df288ed25aa0069b77ec58f2211f0ece5df6ec3", "content_id": "85655364c6b45bdcca4f3a599632f6dfd197a766", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4565, "license_type": "no_license", "max_line_length": 72, "num_lines": 87, "path": "/ui/uart.py", "repo_name": "inzaghian/anzhu", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'uart.ui'\n#\n# Created by: PyQt5 UI code generator 5.11.3\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_uartform(object):\n def setupUi(self, uartform):\n uartform.setObjectName(\"uartform\")\n uartform.resize(387, 303)\n self.txt_show = QtWidgets.QTextEdit(uartform)\n self.txt_show.setGeometry(QtCore.QRect(20, 130, 361, 131))\n self.txt_show.setObjectName(\"txt_show\")\n self.layoutWidget = QtWidgets.QWidget(uartform)\n self.layoutWidget.setGeometry(QtCore.QRect(21, 30, 314, 25))\n self.layoutWidget.setObjectName(\"layoutWidget\")\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget)\n self.horizontalLayout.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.cb_comname = QtWidgets.QComboBox(self.layoutWidget)\n self.cb_comname.setObjectName(\"cb_comname\")\n self.horizontalLayout.addWidget(self.cb_comname)\n self.btn_search = QtWidgets.QPushButton(self.layoutWidget)\n self.btn_search.setObjectName(\"btn_search\")\n self.horizontalLayout.addWidget(self.btn_search)\n self.btn_open = QtWidgets.QPushButton(self.layoutWidget)\n self.btn_open.setObjectName(\"btn_open\")\n self.horizontalLayout.addWidget(self.btn_open)\n self.btn_setcom = QtWidgets.QPushButton(self.layoutWidget)\n self.btn_setcom.setObjectName(\"btn_setcom\")\n self.horizontalLayout.addWidget(self.btn_setcom)\n self.txt_send = QtWidgets.QLineEdit(uartform)\n self.txt_send.setGeometry(QtCore.QRect(20, 70, 361, 20))\n self.txt_send.setObjectName(\"txt_send\")\n self.widget = QtWidgets.QWidget(uartform)\n self.widget.setGeometry(QtCore.QRect(20, 100, 207, 27))\n self.widget.setObjectName(\"widget\")\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widget)\n self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.cb_send = QtWidgets.QCheckBox(self.widget)\n self.cb_send.setObjectName(\"cb_send\")\n self.horizontalLayout_2.addWidget(self.cb_send)\n self.btn_send = QtWidgets.QPushButton(self.widget)\n self.btn_send.setObjectName(\"btn_send\")\n self.horizontalLayout_2.addWidget(self.btn_send)\n self.horizontalLayout_3.addLayout(self.horizontalLayout_2)\n self.btn_receive = QtWidgets.QPushButton(self.widget)\n self.btn_receive.setObjectName(\"btn_receive\")\n self.horizontalLayout_3.addWidget(self.btn_receive)\n self.widget1 = QtWidgets.QWidget(uartform)\n self.widget1.setGeometry(QtCore.QRect(21, 271, 229, 25))\n self.widget1.setObjectName(\"widget1\")\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.widget1)\n self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n self.cb_receive = QtWidgets.QCheckBox(self.widget1)\n self.cb_receive.setObjectName(\"cb_receive\")\n self.horizontalLayout_4.addWidget(self.cb_receive)\n self.btn_clear = QtWidgets.QPushButton(self.widget1)\n self.btn_clear.setObjectName(\"btn_clear\")\n self.horizontalLayout_4.addWidget(self.btn_clear)\n self.btn_save = QtWidgets.QPushButton(self.widget1)\n self.btn_save.setObjectName(\"btn_save\")\n self.horizontalLayout_4.addWidget(self.btn_save)\n\n self.retranslateUi(uartform)\n QtCore.QMetaObject.connectSlotsByName(uartform)\n\n def retranslateUi(self, uartform):\n _translate = QtCore.QCoreApplication.translate\n uartform.setWindowTitle(_translate(\"uartform\", \"串口接收发送界面\"))\n self.btn_search.setText(_translate(\"uartform\", \"搜索\"))\n self.btn_open.setText(_translate(\"uartform\", \"打开\"))\n self.btn_setcom.setText(_translate(\"uartform\", \"设置串口\"))\n self.cb_send.setText(_translate(\"uartform\", \"hex\"))\n self.btn_send.setText(_translate(\"uartform\", \"发送\"))\n self.btn_receive.setText(_translate(\"uartform\", \"接收\"))\n self.cb_receive.setText(_translate(\"uartform\", \"hex显示\"))\n self.btn_clear.setText(_translate(\"uartform\", \"清除\"))\n self.btn_save.setText(_translate(\"uartform\", \"保存\"))\n\n" }, { "alpha_fraction": 0.36867013573646545, "alphanum_fraction": 0.4212020933628082, "avg_line_length": 31.5230770111084, "blob_id": "036c1dd56d4935cb77fbc400b9a8921ed0d91c8a", "content_id": "3a42f571c3508e367dcd2448c5ea3eb4cec04946", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2113, "license_type": "no_license", "max_line_length": 170, "num_lines": 65, "path": "/gps.py", "repo_name": "inzaghian/anzhu", "src_encoding": "UTF-8", "text": "#coding:utf-8\nclass gps():\n def __init__(self):\n self.realdata=b''\n self.fb=False\n\n def Get_gps_data(self,sdata):\n sl=[]\n for e in sdata:\n if e==0x24 and self.fb==False:\n self.fb=True\n self.realdata+=bytes([e])\n if self.fb and e==0x0a:\n self.realdata+=bytes([e])\n data=self.realdata.decode('utf-8','replace')\n sl.append(data)\n self.fb=False\n self.realdata=b''\n elif e!=0x24 and self.fb:\n self.realdata+=bytes([e])\n elif e==0x24 and self.fb and len(self.realdata)>5:\n data=self.realdata.decode('utf-8','replace')\n sl.append(data)\n self.realdata=b''\n self.realdata+=bytes([e])\n print(sl)\n return sl\n\n def CheckGpsBuff(self,buff):\n buff=buff.replace(\"\\r\",\"\").replace(\"\\n\",\"\")\n isok = False\n if len(buff)==(buff.find('*')+2):\n pass\n else:\n crc=0\n for ch in buff:\n if ch=='$':\n pass\n elif ch=='*':\n break\n else:\n if crc==0:\n crc=ord(ch)\n else:\n crc=crc^ord(ch)\n try:\n if buff.find('*')+3==len(buff):\n length=buff.find('*')\n s=buff[length+1]+buff[length+2]\n scode=(str(hex(crc))[2:]).upper()\n if len(scode)==2:\n pass\n else:\n scode=\"0\"+scode\n if s==scode:\n isok=True\n except Exception as e:\n print(\"gpsErr:\",str(e))\n return isok\n\ng=gps()\nsdata=\"$GPRMC,121252.000,A,3958.3032,N,11629.6046,E,15.15,359.95,070306,,,A*54$GPRMC,121252.000,A,3958.3032,N,11629.6046,E,15.15,359.95,070306,,,A*54\\r\\n\".encode('utf-8')\nsl=g.Get_gps_data(sdata)\nfor e in sl:\n print(g.CheckGpsBuff(e))" } ]
10
jain-nikunj/host-node-polling
https://github.com/jain-nikunj/host-node-polling
10a837530b964746b7e05c490f058dd471f7cee0
ac2a1acd7a0de91c30d5d693eb1fee742b522cbd
f9577ebdf81076deae1e3057addc4d779d35a759
refs/heads/master
2021-08-21T20:31:19.781652
2017-11-29T01:08:45
2017-11-29T01:08:45
111,615,877
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.805084764957428, "alphanum_fraction": 0.805084764957428, "avg_line_length": 58, "blob_id": "b9b2a2f577e4a19c5fe2047aa23322fd9a0d2bdc", "content_id": "28f2eca834d149c2f20834fcf36131cca32151df", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 118, "license_type": "permissive", "max_line_length": 97, "num_lines": 2, "path": "/README.md", "repo_name": "jain-nikunj/host-node-polling", "src_encoding": "UTF-8", "text": "# host-node-polling\nBasic scripts to have a gateway node pull from other nodes in the network using netcat and python\n" }, { "alpha_fraction": 0.6558139324188232, "alphanum_fraction": 0.6627907156944275, "avg_line_length": 26.285715103149414, "blob_id": "fef4031772231073945293e1420048711376d081", "content_id": "ac78be0cbb0e3d1e55802d41eaa90b5e0b5c20de", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1720, "license_type": "permissive", "max_line_length": 80, "num_lines": 63, "path": "/scripts/node_poll.py", "repo_name": "jain-nikunj/host-node-polling", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# Created for SC2\n# Add a line in boot.sh saying:\n# nc.traditional -ul -p 441 localhost -e /root/radio-api/node_poll.py &\n# Relies on nc.traditional being installed\n\nimport sys\nimport time\n\ndef get_tap_stats(tap_name):\n '''\n Function which reads /proc/net/dev and expects a line\n with statistics for tap. Returns two numbers: Packets received and transmitted\n from tap by parsing based on a special predefined linux format.\n '''\n with open('/proc/net/dev', 'r') as inFile:\n lines = inFile.readlines()\n\n colLine = lines[1]\n _, receiveCols, transmitCols = colLine.split('|')\n receiveCols = map(lambda a: \"recv_\"+a, receiveCols.split())\n transmitCols = map(lambda a: \"trans_\"+a, transmitCols.split())\n\n cols = receiveCols + transmitCols\n\n faces = {}\n for line in lines[2:]:\n if line.find(':') < 0: continue\n face, data = line.split(':')\n faceData = dict(zip(cols, data.split()))\n faces[face.lstrip().rstrip()] = faceData\n\n tap = faces[tap_name]\n return tap['recv_packets'], tap['trans_packets']\n\ndef check_and_respond(count, tap_name):\n '''\n Checks if the system has received the query character. If so, reads tap\n statistics and writes those to stdout in the format of:\n\n received_bytes transmitted_bytes\n '''\n line = sys.stdin.readline()\n if line and line.rstrip() == \"?\":\n count += 1\n received, transmitted = get_tap_stats(tap_name)\n sys.stdout.write(received + ' ' + transmitted + '\\n')\n sys.stdout.flush()\n\n return count\n\ndef main():\n '''\n Periodically checks, and responds as necessary.\n '''\n count = 0\n tap_name = 'tr0'\n while True:\n count = check_and_respond(count, tap_name)\n time.sleep(0.5)\n\nif __name__ == '__main__':\n main()\n\n" }, { "alpha_fraction": 0.5795601606369019, "alphanum_fraction": 0.5873221158981323, "avg_line_length": 26.60714340209961, "blob_id": "9272a3c38491e83c1efada44160bf5fff65dc6ee", "content_id": "70a05e27fb77b4cd328beb4be97f1383536d5ff8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 773, "license_type": "permissive", "max_line_length": 84, "num_lines": 28, "path": "/scripts/driver_node.py", "repo_name": "jain-nikunj/host-node-polling", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport sys, os\nimport time\nimport subprocess\nfrom subprocess import check_output\n\ndef main():\n last_update_time = 0\n update_time_period = 2\n\n port_num = '441'\n\n cmd = 'nc.traditional -ul -p {} -e /root/radio_api/node_poll.py'.format(\n port_num)\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n\n while True:\n if time.time() - last_update_time > update_time_period:\n p.kill()\n cmd = 'nc.traditional -ul -p {} -e /root/radio_api/node_poll.py'.format(\n port_num)\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n last_update_time = time.time()\n\n time.sleep(update_time_period / 2)\n\nif __name__ == '__main__':\n main()\n" } ]
3
aroberge/qt_py
https://github.com/aroberge/qt_py
2d99d37303cc5acd81967fd0b25c9ba061e10dfc
67e937b4b89b98f314bd2515cb86ae910ad1bf1e
b99e9714377772dd5959517d636412b4293286a7
refs/heads/master
2016-09-08T02:35:53.720140
2015-02-11T00:28:17
2015-02-11T00:28:17
27,925,831
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6118598580360413, "alphanum_fraction": 0.6145552396774292, "avg_line_length": 20.882352828979492, "blob_id": "dfec81664bb6d9705ebf5266404044f890f0b2ae", "content_id": "0c3d5c17b1c587681b0a9adfe07a0e8b566b3f1a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 371, "license_type": "permissive", "max_line_length": 50, "num_lines": 17, "path": "/pyqt_examples/message_box.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "'''Simple message box'''\nfrom PyQt4 import QtGui, QtCore\n\ndef message_box(message=\"Message\", title=\"Title\"):\n \"\"\"Simple message box.\n \"\"\"\n app = QtGui.QApplication([])\n box = QtGui.QMessageBox(None)\n box.setWindowTitle(title)\n box.setText(message)\n box.show()\n box.exec_()\n app.quit()\n\n\nif __name__ == '__main__':\n message_box(\"Simple test\")" }, { "alpha_fraction": 0.64682537317276, "alphanum_fraction": 0.648809552192688, "avg_line_length": 27, "blob_id": "dcfd123964738ada5854204829ae74f1dd1b90a0", "content_id": "a7d6efcaf152b0a784e42d1ca2e203e6fd772f87", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 504, "license_type": "permissive", "max_line_length": 59, "num_lines": 18, "path": "/pyqt_examples/text_input_dialog2.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "'''Simple text input dialog'''\nfrom PyQt4 import QtGui, QtCore\n\ndef text_input(question=\"Enter your response\", default=\"\"):\n app = QtGui.QApplication([])\n\n flags = QtCore.Qt.WindowFlags()\n flags |= QtCore.Qt.FramelessWindowHint\n\n text, ok = QtGui.QInputDialog.getText(None, '',\n question, QtGui.QLineEdit.Normal, default, flags)\n app.quit()\n if ok and text:\n return text\n\nif __name__ == '__main__':\n answer = text_input(question=\"What is your name?\")\n print(answer)\n" }, { "alpha_fraction": 0.6279176473617554, "alphanum_fraction": 0.678260862827301, "avg_line_length": 41.019229888916016, "blob_id": "ea34ef76fee036a9a6ec2f145fe73af733ca0140", "content_id": "6431b11bfffbb6bd4b9e9c14391855075831626f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2185, "license_type": "permissive", "max_line_length": 74, "num_lines": 52, "path": "/pyqt_examples/message_box3.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "'''Simple message box with iconl default image embedded in source code'''\nimport base64\n\nfrom PyQt4 import QtGui, QtCore\nfrom tempfile import TemporaryFile\n\nencoded_icon = (b'iVBORw0KGgoAAAANSUhEUgAAAB4AAAAeCAYAAAA7MK6i'+\n b'AAAACXBIWXMAAA7EAAAOxAGVKw4bAAAAB3RJTUUH3gwOEys1/2xprg'+\n b'AAAAd0RVh0QXV0aG9yAKmuzEgAAAAMdEVYdERlc2NyaXB0aW9uABMJ'+\n b'ISMAAAAKdEVYdENvcHlyaWdodACsD8w6AAAADnRFWHRDcmVhdGlvbi'+\n b'B0aW1lADX3DwkAAAAJdEVYdFNvZnR3YXJlAF1w/zoAAAALdEVYdERp'+\n b'c2NsYWltZXIAt8C0jwAAAAh0RVh0V2FybmluZwDAG+aHAAAAB3RFWH'+\n b'RTb3VyY2UA9f+D6wAAAAh0RVh0Q29tbWVudAD2zJa/AAAABnRFWHRU'+\n b'aXRsZQCo7tInAAABQUlEQVRIid2VQW6FIBBAn80/hktM3HsB7yGH4R'+\n b'5u8TjuTWTpPejm+6sUvow2pu1L2ME8GWcGvPfEFuCvrK7rfCq2957i'+\n b'KdlRFIUHaJqGuq4BqKrq274QpRRlWQLQ9z0A1toitjcplkpj8rZteU'+\n b'X3fvcBH0eBJFIA5xzLshzuOxSHzPOMMQallPTojof0wDAMWGtxzuUd'+\n b'CFK8Ir7xT3FKrLW+P9XGGID8VCf4W6kW8RxG94sTHIrneRYF3E4uQN'+\n b'5O4zgyTZNIHpvVKaKzGr4eiiv4xG3fircfsD4YVVVhjNkt4NXTWuu3'+\n b'si1ZxbVKU5zp6UtVvb31beIrUjgxMlfpVbLEa1utxRVy5sHIqmpJwN'+\n b'yqPhSHHxG2Vq4o5PfO6n8nPtVOW2LFl/PfL4nD1lJKobXOOns61dJ3'+\n b'OkTcTjn7clL9CaQWvgP3zR49AAAAAElFTkSuQmCC')\n\n\ndef message_box(message=\"Message\", title=\"Title\", icon=None):\n \"\"\"Simple message box.\n \"\"\"\n app = QtGui.QApplication([])\n box = QtGui.QMessageBox(None)\n if icon is not None:\n box.setWindowIcon(QtGui.QIcon(icon))\n else:\n # could not get the temp file to live long enough to be usable.\n # start by creating one to get a valid path/filename\n temp_file = TemporaryFile(suffix=\".png\")\n fname = temp_file.name\n temp_file.close()\n # use that path/filename to write a real file.\n ff = open(fname, \"wb\")\n ff.write(base64.b64decode(encoded_icon))\n ff.close()\n box.setWindowIcon(QtGui.QIcon(fname))\n box.setWindowTitle(title)\n box.setText(message)\n box.show()\n box.exec_()\n app.quit()\n\n\nif __name__ == '__main__':\n message_box(\"Simple test\")\n message_box(icon=\"images/python.jpg\")\n" }, { "alpha_fraction": 0.7756264209747314, "alphanum_fraction": 0.7949886322021484, "avg_line_length": 40.80952453613281, "blob_id": "17b8a6c54cd483d2de5f068356cc30d302a5b3bd", "content_id": "12131dd37514aaef28d10ab36558339406183a9d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 878, "license_type": "permissive", "max_line_length": 97, "num_lines": 21, "path": "/pyqt_examples/readme.md", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "This directory contains some very simple examples using PyQt.\nThey are a simple progression so that detailed explanations should\nnot be needed to understand what changes and its effects.\n\nI use Python 3.4 and PyQt 4.10.4 for testing\n\n* window1: simplest possible window, with default values\n* window2: simple window with custom values\n* window3: simple window using setGeometry instead of two separate methods\n* window4: simple window - class based\n* window5: adding icon\n\n* text_input_dialog1: simple text dialog\n* text_input_dialog2: removing frame\n* text_input_dialog3: no frame, customizable font\n\n* integer_input_dialog1: simple dialog that ask a user to input a number within a specified range\n\n* message_box: simple message box\n* message_box2: message box with configurable icon\n* message_box3: message box with configurable icon and default icon embedded in source code\n" }, { "alpha_fraction": 0.5556961894035339, "alphanum_fraction": 0.5708860754966736, "avg_line_length": 26.241378784179688, "blob_id": "24898b2a30cbd2ea46b212c468e3d4fd2945ac68", "content_id": "85c9ebe9dbb0e5d1c53c93b957fc455b41bcbe19", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 790, "license_type": "permissive", "max_line_length": 72, "num_lines": 29, "path": "/pyqt_examples/window5.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "'''Simple window - with icon.'''\nimport sys\nfrom PyQt4 import QtGui\n\nclass SimpleWindow(QtGui.QWidget):\n def __init__(self, title=None, position=None, size=None, icon=None):\n super().__init__()\n\n if title is None:\n title = \"Simple Window\"\n self.setWindowTitle(title)\n\n if position is not None:\n self.move(*position)\n\n if size is not None:\n self.resize(*size)\n\n if icon is not None:\n self.setWindowIcon(QtGui.QIcon(icon))\n self.show()\n\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n window = SimpleWindow()\n window2 = SimpleWindow(title=\"Other window\", size=(400, 200),\n position=(10, 10), icon=\"images/python.jpg\")\n sys.exit(app.exec_())\n" }, { "alpha_fraction": 0.7048457860946655, "alphanum_fraction": 0.7048457860946655, "avg_line_length": 21.700000762939453, "blob_id": "8cb8d34b90f27d81c6d0558c6cb1ee3a544aa868", "content_id": "36d9bf3c6fe0ab21cabb932fe47adf46da1d38e2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 454, "license_type": "permissive", "max_line_length": 79, "num_lines": 20, "path": "/README.md", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "qt_py\n=====\n\nShort term goal\n---------------\n\nBecome familiar with using PyQt. As such, this repository should not be\nof much use for anyone else.\n\nLonger term goal\n---------------\n\nInvestigate the possibility of creating a simple set of self-contained widgets,\ninspired by easygui, so that they could be used by beginners.\n\nDocumentation\n-------------\n\nNot planned for the near future; however, a readme file should be\nincluded in most subdirectories.\n" }, { "alpha_fraction": 0.5746268630027771, "alphanum_fraction": 0.5746268630027771, "avg_line_length": 21.5, "blob_id": "167d04e99cf4afa708d0579394110c78a53c35c9", "content_id": "3ecd58741c16f1fefffd6fd3d5866b0d1f87f88a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 134, "license_type": "permissive", "max_line_length": 53, "num_lines": 6, "path": "/experiment/test_text_input.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "import bguic\n\nname = bguic.text_input(message=\"What is your name?\",\n title=\"Mine is Reeborg.\")\n\nprint(name)" }, { "alpha_fraction": 0.5452352166175842, "alphanum_fraction": 0.5615199208259583, "avg_line_length": 29.971961975097656, "blob_id": "b7af6ad5b53a53eb03539d2079ef989eccd66c55", "content_id": "351b1c856abf39b01d45ec0626a4f55b16b3fed7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3316, "license_type": "permissive", "max_line_length": 70, "num_lines": 107, "path": "/mini_games/flood_test.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "\n\nfrom PyQt4 import QtCore\nfrom PyQt4 import QtGui\n\nimport board\n\n\nclass ExperimentalBoard(board.Board):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.color = QtGui.QColor(200, 0, 0)\n\n def draw(self, painter):\n '''Basic drawing method; usually overriden'''\n painter.setBrush(QtGui.QColor(200, 200, 200))\n painter.drawRect(0, 0, self.width, self.height)\n\n painter.setPen(QtGui.QColor(155, 155, 155))\n for row in range(self.nb_rows + 1):\n y = row * self.tile_size\n painter.drawLine(0, y, self.width, y)\n\n for col in range(self.nb_cols + 1):\n x = col * self.tile_size\n painter.drawLine(x, 0, x, self.height)\n\n self.draw_tiles(painter)\n\n def draw_tiles(self, painter):\n for tile in self.grid:\n if self.grid[tile] is not None:\n col, row = tile\n x = col * self.tile_size\n y = row * self.tile_size\n painter.setBrush(self.grid[tile])\n painter.drawRect(x, y, self.tile_size, self.tile_size)\n\n def handle_left_click(self, tile):\n '''meant to be overriden'''\n self.grid[tile] = self.color\n self.repaint()\n\n def handle_right_click(self, tile):\n '''meant to be overriden'''\n old_color = self.grid[tile]\n x, y = tile\n self.flood_fill(x, y, old_color, self.color)\n self.repaint()\n\n def flood_fill(self, x, y, old_color, new_color):\n if (x, y) not in self.grid:\n return\n if self.grid[(x, y)] != old_color:\n return\n self.grid[(x, y)] = new_color\n\n self.flood_fill(x + 1, y, old_color, new_color)\n self.flood_fill(x - 1, y, old_color, new_color)\n self.flood_fill(x, y + 1, old_color, new_color)\n self.flood_fill(x, y - 1, old_color, new_color)\n\n\nclass FloodTest(QtGui.QMainWindow):\n '''Non real game set up to try various functions/methods\n that can be used in games'''\n\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n def init_ui(self):\n\n self.setWindowTitle(\"Test Game\")\n self.statusbar = self.statusBar()\n self.board = ExperimentalBoard(self, 10, 10)\n self.setCentralWidget(self.board)\n self.resize(self.board.width, self.board.height)\n self.setFixedSize(self.board.width,\n self.board.height+self.statusbar.height())\n self.show()\n\n def receive_message(self, message):\n self.statusbar.showMessage(message)\n\n def keyPressEvent(self, event): # noqa\n if event.key() == QtCore.Qt.Key_B:\n self.board.color = QtGui.QColor(0, 0, 200)\n elif event.key() == QtCore.Qt.Key_R:\n self.board.color = QtGui.QColor(200, 0, 0)\n elif event.key() == QtCore.Qt.Key_G:\n self.board.color = QtGui.QColor(0, 200, 0)\n elif event.key() == QtCore.Qt.Key_F:\n self.showFullScreen()\n print(self.width(), self.height())\n elif event.key() == QtCore.Qt.Key_Escape:\n self.resize(self.board.width, self.board.height)\n self.show()\n\n\ndef main():\n\n app = QtGui.QApplication([])\n FloodTest()\n app.exec_()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.563973069190979, "alphanum_fraction": 0.5732323527336121, "avg_line_length": 32, "blob_id": "81ba23b2d7dc7eced869624fe1859951743af348", "content_id": "42130875883a1ae6b78aeca4f798a7298b6717aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1188, "license_type": "permissive", "max_line_length": 76, "num_lines": 36, "path": "/pyqt_examples/integer_input_dialog1.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "'''Simple integer input dialog'''\nfrom PyQt4 import QtGui, QtCore\n\ndef integer_input(message=\"Choose a number\", title=\"Title\",\n default_value=1, min_=0, max_=100, step=1):\n \"\"\"Simple dialog to ask a user to select a number within a certain range\n \"\"\"\n app = QtGui.QApplication([])\n\n flags = QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint\n\n number, ok = QtGui.QInputDialog.getInteger(None,\n title, message, default_value, min_, max_, step, flags)\n app.quit()\n if ok:\n return number\n\nif __name__ == '__main__':\n from random import randint\n min_ = 1\n max_ = 50\n answer = randint(min_, max_)\n print(answer)\n guess = 0\n title = \"Guessing game\"\n while guess != answer:\n message = \"Guess a number between {} and {}\".format(min_, max_)\n guess = integer_input(message=message, title=title,\n default_value=guess, min_=min_ ,max_=max_)\n if guess < answer:\n title = \"Too low\"\n min_ = guess\n elif guess > answer:\n title = \"Too high\"\n max_ = guess\n print(\"You got it! {} was the answer\".format(guess))\n" }, { "alpha_fraction": 0.627609133720398, "alphanum_fraction": 0.6328273415565491, "avg_line_length": 30.954545974731445, "blob_id": "c9f135324e43f38fcadb81fd91a5041b94b75241", "content_id": "613aa53da2955af3fb52e05c0f1833a1c1c87f21", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2108, "license_type": "permissive", "max_line_length": 78, "num_lines": 66, "path": "/pyqt_examples/choices.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "import sys\nfrom PyQt4 import QtGui\n\n\nclass MultipleChoicesDialog(QtGui.QMainWindow):\n \"\"\"Dialog with the possibility of selecting one or more\n items from a list\"\"\"\n def __init__(self, choices=None, title=\"Title\"):\n super().__init__()\n if choices is None:\n choices = [\"Item %d\"%i for i in range(10)]\n self.setWindowTitle(title)\n\n main_widget = QtGui.QWidget()\n main_layout = QtGui.QVBoxLayout()\n main_widget.setLayout(main_layout)\n self.setCentralWidget(main_widget)\n\n self.choices_widget = QtGui.QListWidget()\n self.choices_widget.setSelectionMode(\n QtGui.QAbstractItemView.ExtendedSelection)\n\n for choice in choices:\n item = QtGui.QListWidgetItem()\n item.setText(choice)\n self.choices_widget.addItem(item)\n\n main_layout.addWidget(self.choices_widget)\n\n\n# button_box = QtGui.QGroupBox(name)\n button_box_layout = QtGui.QGridLayout()\n\n return_choices_btn = QtGui.QPushButton(\"Ok\")\n return_choices_btn.clicked.connect(self.return_choices)\n select_all_btn = QtGui.QPushButton(\"Select all\")\n select_all_btn.clicked.connect(self.select_all)\n clear_all_btn = QtGui.QPushButton(\"Clear all\")\n clear_all_btn.clicked.connect(self.clear_all)\n\n\n button_box = QtGui.QWidget()\n button_box_layout.addWidget(select_all_btn, 0, 0)\n button_box_layout.addWidget(clear_all_btn, 1, 0)\n button_box_layout.addWidget(return_choices_btn, 1, 1)\n button_box.setLayout(button_box_layout)\n\n main_layout.addWidget(button_box)\n\n\n # todo: add buttons Ok, Cancel, Select all, Clear\n\n def return_choices(self):\n print([item.text() for item in self.choices_widget.selectedItems()])\n\n def select_all(self):\n self.choices_widget.selectAll()\n\n def clear_all(self):\n self.choices_widget.clearSelection()\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n dialog_1 = MultipleChoicesDialog()\n dialog_1.show()\n app.exec_()" }, { "alpha_fraction": 0.5321933031082153, "alphanum_fraction": 0.5378438830375671, "avg_line_length": 31.326923370361328, "blob_id": "11dd6a3cfada8b518ed6ca4dcf15079a57adfe91", "content_id": "47481d409e2e080b91782c77919873ff449f4c4f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6725, "license_type": "permissive", "max_line_length": 80, "num_lines": 208, "path": "/mini_games/minesweeper.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "\nimport random\n\nfrom PyQt4 import QtGui\n\nimport board\n\nclass Tile:\n def __init__(self, image=None, value=None):\n self.image = image\n self.value = value\n\n\nimages = {}\n\nfor nb_mines in range(1, 9):\n images[nb_mines] = QtGui.QImage(\"images/number_{}.png\".format(nb_mines))\n\nfor name in [\"covered\", \"empty\", \"flag_mine\", \"flag_mine_wrong\", \"flag_suspect\",\n \"mine\", \"mine_wrong\"]:\n images[name] = QtGui.QImage(\"images/{}.png\".format(name))\n\n\nclass MyBoard(board.Board):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.game_started = False\n self.game_init((None, None))\n self.nb_mines = 10\n self.marked_mines = 0\n self.game_over = False\n\n def reset(self):\n #self.create_empty_grid()\n self.game_init((None, None))\n self.nb_mines = 10\n self.marked_mines = 0\n self.game_over = False\n self.game_started = False\n self.repaint()\n\n def game_init(self, tile):\n if tile == (None, None):\n for tile_ in self.grid:\n self.grid[tile_] = Tile(images[\"covered\"], None)\n return\n mines = 0\n while mines < self.nb_mines:\n x = random.randint(0, self.nb_cols-1)\n y = random.randint(0, self.nb_rows-1)\n if (x, y) == tile: # do not put a bomb at location of first click\n continue\n if self.grid[(x, y)].value is None:\n self.grid[(x, y)].value = \"mine\"\n mines += 1\n for tile_ in self.grid:\n if self.grid[tile_].value != \"mine\":\n self.count_mine_neighbours(tile_)\n self.game_started = True\n\n def count_mine_neighbours(self, tile):\n mines = 0\n for i in [-1, 0, 1]:\n for j in [-1, 0, 1]:\n if i==j and i== 0:\n continue\n neighbour = (tile[0]+i, tile[1]+j)\n if neighbour in self.grid:\n if self.grid[neighbour].value == \"mine\":\n mines += 1\n if mines != 0:\n self.grid[tile].value = mines\n\n def draw(self, painter):\n '''Basic drawing method; usually overriden'''\n\n for tile in self.grid:\n col, row = tile\n painter.drawImage(col*self.tile_size, row*self.tile_size,\n self.grid[tile].image)\n\n def mousePressEvent(self, event):\n if self.game_over:\n return\n super().mousePressEvent(event)\n\n def handle_left_click(self, tile):\n '''meant to be overriden'''\n message = \"{} mines left\".format(self.nb_mines - self.marked_mines)\n if not self.game_started:\n self.game_init(tile)\n if self.grid[tile].value is None:\n self.open_empty_region(tile)\n else:\n if self.grid[tile].image == images[\"flag_mine\"]:\n self.marked_mines -= 1\n self.grid[tile].image = images[self.grid[tile].value]\n if self.grid[tile].value == \"mine\":\n message = \"You lose!\"\n self.game_over = True\n self.repaint()\n self.send_message(message)\n\n\n def handle_right_click(self, tile):\n if not self.game_started:\n return\n if self.grid[tile].image == images[\"covered\"]:\n self.grid[tile].image = images[\"flag_mine\"]\n self.marked_mines += 1\n message = \"{} mines left\".format(self.nb_mines - self.marked_mines)\n self.send_message(message)\n if self.marked_mines == self.nb_mines:\n self.game_over = True\n self.evaluate_position()\n elif self.grid[tile].image == images[\"flag_mine\"]:\n self.grid[tile].image = images[\"flag_suspect\"]\n self.marked_mines -= 1\n message = \"{} mines left\".format(self.nb_mines - self.marked_mines)\n self.send_message(message)\n elif self.grid[tile].image == images[\"flag_suspect\"]:\n self.grid[tile].image = images[\"covered\"]\n else:\n return\n self.repaint()\n\n def evaluate_position(self):\n for tile in self.grid:\n if self.grid[tile].value == \"mine\":\n if self.grid[tile].image != images[\"flag_mine\"]:\n self.send_message(\"You lose\")\n self.show_losing_board()\n return\n\n self.show_winning_board()\n self.send_message(\"You win!\")\n return\n\n def show_winning_board(self):\n '''after all mines have been guessed correctly, uncover remaining\n tiles'''\n for tile in self.grid:\n if self.grid[tile].image == images[\"covered\"]:\n self.grid[tile].image = images[self.grid[tile].value]\n\n\n def show_losing_board(self):\n '''Show incorrect flags'''\n for tile in self.grid:\n cell = self.grid[tile]\n if (cell.image == images[\"flag_mine\"] and cell.value != \"mine\"):\n cell.image = images[\"flag_mine_wrong\"]\n elif cell.value == \"mine\" and cell.image != images[\"flag_mine\"]:\n cell.image = images[\"mine\"]\n\n\n def open_empty_region(self, tile):\n if tile not in self.grid:\n return\n if self.grid[tile].value is not None:\n self.grid[tile].image = images[self.grid[tile].value]\n return\n\n if self.grid[tile].image == images[\"empty\"]:\n return\n self.grid[tile].image = images[\"empty\"]\n x, y = tile\n for i in [-1, 0, 1]:\n for j in [-1, 0, 1]:\n self.open_empty_region((x+i, y+j))\n\n\nclass TestGame(QtGui.QMainWindow):\n '''Non real game set up to try various functions/methods\n that can be used in games'''\n\n def __init__(self):\n super().__init__()\n self.board = MyBoard(self, tile_size=24)\n self.init_ui()\n\n def init_ui(self):\n\n self.setWindowTitle(\"Test Game\")\n self.statusbar = self.statusBar()\n menu = self.menuBar()\n new_game_menu = menu.addMenu(\"New Game\")\n new_game_action = QtGui.QAction(\"Easy\", self)\n new_game_action.triggered.connect(self.board.reset)\n new_game_menu.addAction(new_game_action)\n self.setCentralWidget(self.board)\n self.resize(self.board.width, self.board.height)\n self.setFixedSize(self.board.width,\n self.board.height+self.statusbar.height()+menu.height())\n self.show()\n\n def receive_message(self, message):\n self.statusbar.showMessage(message)\n\n\ndef main():\n\n app = QtGui.QApplication([])\n game = TestGame()\n app.exec_()\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6471773982048035, "alphanum_fraction": 0.6673387289047241, "avg_line_length": 25.105262756347656, "blob_id": "9e43b13ae0e3b73e6fdda3aa1223ca4f2741c65c", "content_id": "e29871918a90c2d06a1ff456d0575ea7a314fd74", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 496, "license_type": "permissive", "max_line_length": 55, "num_lines": 19, "path": "/varia/splash.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "''' Small app: just a text splash screen '''\n\nimport sys\n\nimport PyQt4.QtCore as Core\nimport PyQt4.QtGui as Gui\n\ndef show_message(message):\n '''shows a message as a splash screen'''\n app = Gui.QApplication(sys.argv)\n label = Gui.QLabel( message)\n label.setStyleSheet(\"QWidget { font-size:100em }\" )\n label.setWindowFlags(Core.Qt.SplashScreen)\n label.show()\n Core.QTimer.singleShot(10000, app.quit)\n app.exec_()\n\nif __name__ == '__main__':\n show_message(\"Hello world!\")\n" }, { "alpha_fraction": 0.6875, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 13.399999618530273, "blob_id": "e967dc0ab0ab2ad04ad9a59f2034110db38d0819", "content_id": "3a8ea0ec454eae44973acf3750a0a05b49c0d63b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "permissive", "max_line_length": 34, "num_lines": 10, "path": "/pyqt_examples/window1.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "'''Simple window'''\nimport sys\nfrom PyQt4 import QtGui\n\napp = QtGui.QApplication(sys.argv)\n\nw = QtGui.QWidget()\nw.show()\n\nsys.exit(app.exec_())\n" }, { "alpha_fraction": 0.6401673555374146, "alphanum_fraction": 0.642259418964386, "avg_line_length": 27.176469802856445, "blob_id": "77a1775f7114dcc7c22423ecb2253ccbb7cdd497", "content_id": "68c8d3bffefd671ef3b53f653899d3d62f78ed8f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 478, "license_type": "permissive", "max_line_length": 59, "num_lines": 17, "path": "/pyqt_examples/text_input_dialog1.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "'''Simple text input dialog'''\nfrom PyQt4 import QtGui, QtCore\n\ndef text_input(question=\"Enter your response\", default=\"\"):\n app = QtGui.QApplication([])\n\n text, ok = QtGui.QInputDialog.getText(None, '',\n question, QtGui.QLineEdit.Normal, default)\n app.quit()\n if ok and text:\n return text\n\nif __name__ == '__main__':\n answer = text_input(question=\"What is your name?\")\n print(answer)\n answer = text_input(default=\"response\")\n print(answer)" }, { "alpha_fraction": 0.5961377024650574, "alphanum_fraction": 0.6087321639060974, "avg_line_length": 24.89130401611328, "blob_id": "aac8c6070a19fa5d6b2aa3d670243856812965fc", "content_id": "5ec851f979aa482029f5dc8dab2eca033435e1e9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1191, "license_type": "permissive", "max_line_length": 80, "num_lines": 46, "path": "/varia/reminder.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "''' Small reminder app '''\n\nimport sys\nimport time\n\nimport PyQt4.QtCore as Core\nimport PyQt4.QtGui as Gui\n\ndef get_info():\n '''gets info from command line'''\n alert_time = Core.QTime.currentTime()\n message = \"Ready!\"\n\n if len(sys.argv) < 2:\n raise ValueError\n\n hour, minutes = sys.argv[1].split(\":\")\n alert_time = Core.QTime(int(hour), int(minutes))\n if not alert_time.isValid():\n print(\"invalid time\")\n raise ValueError\n\n if len(sys.argv) == 2:\n message = sys.argv[2]\n else:\n message = ' '.join(sys.argv[2:])\n return message, alert_time\n\ndef show_alert(message, alert_time):\n app = Gui.QApplication(sys.argv)\n while Core.QTime.currentTime() < alert_time:\n time.sleep(2)\n label = Gui.QLabel(\"<font color=blue size=20><b>\" + message + \"</b></font>\")\n label.setWindowFlags(Core.Qt.SplashScreen)\n label.show()\n Core.QTimer.singleShot(10000, app.quit)\n app.exec_()\n\nif __name__ == '__main__':\n try:\n message, alert_time = get_info()\n except Exception as e:\n print(e)\n print(\"usage: python reminder.py hour:min message\")\n sys.exit()\n show_alert(message, alert_time)\n" }, { "alpha_fraction": 0.5636754631996155, "alphanum_fraction": 0.5652874708175659, "avg_line_length": 33.44444274902344, "blob_id": "25617a031fba42b977d297af58d635c6f599f09f", "content_id": "ca9af644224a0226e134e1ac63fc609b8681cb8e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1861, "license_type": "permissive", "max_line_length": 76, "num_lines": 54, "path": "/experiment/bguic_demo.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "import bguic\nfrom random import randint\n\n\ndef guessing_game():\n name = bguic.text_input(message=\"What is your name?\",\n title=\"Mine is Reeborg.\")\n if not name:\n name = \"Unknown person\"\n\n message = \"\"\"<p>The following language selection will only affect the\n default GUI elements like the text on the buttons.\n Note that <b>Default</b> is reverting back to the\n local PyQt default (likely English).</p>\"\"\"\n\n bguic.message_box(message=message, title=\"For information\")\n bguic.select_language()\n\n bguic.message_box(message=\"If the text is too small or too large,\" +\n \" you can fix that\",\n title=\"For information\")\n bguic.set_global_font()\n bguic.message_box(message=\"Hello {}. Let's play a game\".format(name),\n title=\"Guessing game!\")\n\n guess = min_ = 1\n max_ = 50\n answer = randint(min_, max_)\n title = \"Guessing game\"\n while guess != answer:\n message = \"Guess a number between {} and {}\".format(min_, max_)\n prev_guess = guess\n guess = bguic.integer_input(message=message, title=title,\n default_value=guess, min_=min_ ,max_=max_)\n if guess is None:\n quitting = bguic.yes_no_question(\"Do you want to quit?\")\n guess = prev_guess\n if quitting:\n break\n elif guess < answer:\n title = \"Too low\"\n min_ = guess\n elif guess > answer:\n title = \"Too high\"\n max_ = guess\n else:\n message=\"Congratulations {}! {} was the answer.\".format(name, guess)\n bguic.message_box(message, title=\"You win!\")\n\n\nif __name__ == '__main__':\n bguic.message_box(\"Temporarily setting the locale to Spanish\")\n bguic.set_locale('es')\n guessing_game()\n\n" }, { "alpha_fraction": 0.5600000023841858, "alphanum_fraction": 0.6800000071525574, "avg_line_length": 24, "blob_id": "a0f9f4b86e34cc9b4e26fcba1c9e69f1614dbd57", "content_id": "452b08eb20e4b9f5dd36005de008036258d8272b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25, "license_type": "permissive", "max_line_length": 24, "num_lines": 1, "path": "/varia/lib/version.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "pyqtris_version = \"1.01\"\n" }, { "alpha_fraction": 0.7836257219314575, "alphanum_fraction": 0.8187134265899658, "avg_line_length": 27.5, "blob_id": "de186784cceed82e2e431ebcab9bca59c1a782e0", "content_id": "19e3d1a4e7b10032e498dfb60b8f12112988d365", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 171, "license_type": "permissive", "max_line_length": 71, "num_lines": 6, "path": "/images/planet_cute/readme.txt", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "Graphics originally from\n\nhttp://www.lostgarden.com/2007/05/dancs-miraculously-flexible-game.html\n\nThe names have been changed to elimate spaces and convert to\nlowercase.\n" }, { "alpha_fraction": 0.6931034326553345, "alphanum_fraction": 0.7379310131072998, "avg_line_length": 21.30769157409668, "blob_id": "57097463429f77dda8bff065791dd7e6097ea147", "content_id": "61fb7d6745228ae1133f69a09b96993e9f7ad31f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 290, "license_type": "permissive", "max_line_length": 67, "num_lines": 13, "path": "/pyqt_examples/window3.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "'''Simple window with some custom values, using setGeometry instead\n of two separate methods'''\nimport sys\nfrom PyQt4 import QtGui\n\napp = QtGui.QApplication(sys.argv)\n\nw = QtGui.QWidget()\nw.setGeometry(100, 100, 300, 200)\nw.setWindowTitle('Simple window')\nw.show()\n\nsys.exit(app.exec_())\n" }, { "alpha_fraction": 0.7697368264198303, "alphanum_fraction": 0.8092105388641357, "avg_line_length": 37, "blob_id": "ba2337eb05b91ee5863c7a2115045a2d44a03d5e", "content_id": "06a347ab5e7e170cf5b13140cdaf2186fd0cfba6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 152, "license_type": "permissive", "max_line_length": 58, "num_lines": 4, "path": "/experiment/readme.md", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "This directory contains an experimental version of a\nBasic Graphical User Interface Components (bguic) library.\n\nTested with Python 3.4 and PyQt 4.10.4\n" }, { "alpha_fraction": 0.5666146874427795, "alphanum_fraction": 0.5765990614891052, "avg_line_length": 28.403669357299805, "blob_id": "94580c8cb467110ea7dd35413a5d5bdae59f6896", "content_id": "693e1d73890a0ad29e0c2fdce8343c76d50955a0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3205, "license_type": "permissive", "max_line_length": 69, "num_lines": 109, "path": "/mini_games/board.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "'''Simple board'''\n\nfrom PyQt4 import QtCore\nfrom PyQt4 import QtGui\n\n\nclass Board(QtGui.QWidget):\n\n def __init__(self, parent, nb_cols=10, nb_rows=10, tile_size=32):\n super().__init__()\n\n self.parent = parent\n self.nb_cols = nb_cols\n self.nb_rows = nb_rows\n self.create_empty_grid()\n self.tile_size = tile_size\n self.width = self.tile_size * self.nb_cols\n self.height = self.tile_size * self.nb_rows\n\n def create_empty_grid(self):\n '''creates a grid as a dict with (row, col) as keys\n and None as values'''\n self.grid = {}\n for row in range(self.nb_rows):\n for col in range(self.nb_cols):\n self.grid[(col, row)] = None\n\n def paintEvent(self, event): # noqa\n '''Overriden QWidget method'''\n painter = QtGui.QPainter()\n painter.begin(self)\n self.draw(painter)\n painter.end()\n\n def mousePressEvent(self, event): # noqa\n '''Overriden QWidget method'''\n tile = self.which_tile_clicked(event)\n if event.button() == QtCore.Qt.RightButton:\n self.handle_right_click(tile)\n elif event.button() == QtCore.Qt.LeftButton:\n self.handle_left_click(tile)\n\n def handle_right_click(self, tile):\n '''meant to be overriden'''\n self.send_message(\"{} clicked at {}\".format(\"right\", tile))\n\n def handle_left_click(self, tile):\n '''meant to be overriden'''\n self.send_message(\"{} clicked at {}\".format(\"left\", tile))\n\n def which_tile_clicked(self, event):\n '''Determine which row and col mouse click occurred'''\n x = event.x()\n y = event.y()\n col = x // self.tile_size\n row = y // self.tile_size\n return col, row\n\n def draw(self, painter):\n '''Basic drawing method; usually overriden'''\n painter.setBrush(QtGui.QColor(200, 200, 200))\n painter.drawRect(0, 0, self.width, self.height)\n\n painter.setPen(QtGui.QColor(155, 155, 155))\n for row in range(self.nb_rows + 1):\n y = row * self.tile_size\n painter.drawLine(0, y, self.width, y)\n\n for col in range(self.nb_cols + 1):\n x = col * self.tile_size\n painter.drawLine(x, 0, x, self.height)\n\n def send_message(self, message):\n '''sends a message to the parent'''\n self.parent.receive_message(message)\n\n\nclass TestGame(QtGui.QMainWindow):\n '''Non real game set up to try various functions/methods\n that can be used in games'''\n\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n def init_ui(self):\n\n self.setWindowTitle(\"Test Game\")\n self.statusbar = self.statusBar()\n self.board = Board(self)\n self.setCentralWidget(self.board)\n self.resize(self.board.width, self.board.height)\n self.setFixedSize(self.board.width,\n self.board.height+self.statusbar.height())\n self.show()\n\n def receive_message(self, message):\n self.statusbar.showMessage(message)\n\n\ndef main():\n\n app = QtGui.QApplication([])\n TestGame()\n app.exec_()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.5101901888847351, "alphanum_fraction": 0.5149456262588501, "avg_line_length": 26.27777862548828, "blob_id": "3edaf6de3ed29fa6dee6be559af11c363241ed62", "content_id": "d04d2fcfece84d8ac0b7ea1014470b032a4da1ee", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1472, "license_type": "permissive", "max_line_length": 79, "num_lines": 54, "path": "/varia/calc.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "from math import *\nimport math\n\nimport PyQt4.QtCore as Core\nimport PyQt4.QtGui as Gui\n\nclass Calculatrice(Gui.QDialog):\n def __init__(self):\n super().__init__()\n\n self.entrees = Gui.QLineEdit()\n self.resultats = Gui.QTextBrowser()\n help = Gui.QTextBrowser()\n self.setWindowTitle(\"Calculatrice\")\n\n layout = Gui.QVBoxLayout()\n layout.addWidget(self.entrees)\n layout.addWidget(self.resultats)\n layout.addWidget(help)\n display = ''\n for index, expr in enumerate(dir(math)):\n if not expr.startswith('_'):\n if (index + 1) % 3 == 0:\n help.append(display)\n display = expr + \"\\t\"\n else:\n display += expr + \"\\t\"\n help.append(display)\n self.setLayout(layout)\n self.entrees.selectAll()\n self.entrees.setFocus()\n\n self.entrees.returnPressed.connect(self.evalue)\n\n\n def evalue(self):\n text = self.entrees.text()\n try:\n self.resultats.append('{0} = <b>{1}</b>'.format(text, eval(text)))\n except:\n self.resultats.append(\n \"<font color=red>Expression non-valide:</font> {}\".format(\n text))\n self.entrees.setText('')\n\n\n\n\n\nif __name__ == '__main__':\n app = Gui.QApplication([])\n calc = Calculatrice()\n calc.show()\n app.exec_()" }, { "alpha_fraction": 0.6438152194023132, "alphanum_fraction": 0.6512667536735535, "avg_line_length": 23.851852416992188, "blob_id": "a709c17007edc2cdb948f5d1b8d68b1cb7610308", "content_id": "1531c25c9b65fca64d6e45ef959eb273429896a9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 671, "license_type": "permissive", "max_line_length": 91, "num_lines": 27, "path": "/experiment/drive_test_text_input.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "import subprocess\nimport threading\nimport time\nimport pyautogui\n\nclass TypewriteThread(threading.Thread):\n def __init__(self, msg, interval=0.0):\n super(TypewriteThread, self).__init__()\n self.msg = msg\n self.interval = interval\n\n\n def run(self):\n time.sleep(1.) # NOTE: BE SURE TO ACCOUNT FOR THIS QUARTER SECOND FOR TIMING TESTS!\n pyautogui.typewrite(self.msg, self.interval)\n\n\nw, h = pyautogui.size()\npyautogui.moveTo(w/2, h/2)\nt = TypewriteThread('Hi!\\n')\nt.start()\noutput = subprocess.check_output('pyconda test_text_input.py',\n universal_newlines=True)\n\n\n\nprint(\"captured output = \", output)\n" }, { "alpha_fraction": 0.5991649031639099, "alphanum_fraction": 0.6033402681350708, "avg_line_length": 33.238094329833984, "blob_id": "1ac906cf8a49ce58a54bce90204c0f96c28a85fa", "content_id": "fbe0ed9a106b2efad1bad93cd6e2baaffd1f93e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1437, "license_type": "permissive", "max_line_length": 81, "num_lines": 42, "path": "/pyqt_examples/text_input_dialog3.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "'''Simple text input dialog'''\nfrom PyQt4 import QtGui, QtCore\n\ndef text_input(message=\"Enter your response\", default=\"\", font=None):\n app = QtGui.QApplication([])\n if font is not None:\n try:\n family, size = font\n default_font = QtGui.QFont()\n if family:\n default_font.setFamily(family)\n if size:\n default_font.setPointSize(size)\n app.setFont(default_font)\n except:\n print(\"Can not set font. Expected font = (family:str, size:int).\")\n print(\"Got font =\", font)\n\n flags = QtCore.Qt.WindowFlags()\n flags |= QtCore.Qt.FramelessWindowHint\n\n text, ok = QtGui.QInputDialog.getText(None, '',\n message, QtGui.QLineEdit.Normal, default, flags)\n app.quit()\n if ok:\n return text\n else:\n return None # I know, not needed since it is the default...\n\nif __name__ == '__main__':\n answer = text_input(message=\"What is your name?\")\n print(answer)\n answer = text_input(default=\"response\")\n print(answer)\n answer = text_input(message=\"Test set font\", font=(\"Times\", 12))\n print(answer)\n answer = text_input(message=\"Test set font size only\", font=(\"\", 12))\n print(answer)\n answer = text_input(message=\"Test set font family only\", font=(\"Courier\", 0))\n print(answer)\n answer = text_input(message=\"Intentional font error\", font=\"Helvetica\")\n print(answer)" }, { "alpha_fraction": 0.6638655662536621, "alphanum_fraction": 0.7184873819351196, "avg_line_length": 17.30769157409668, "blob_id": "767467b57d3c1de4752fc7b745bad56f059c1a20", "content_id": "e159c1edb5cce0707dbac85af9c69f1c0fb4efd9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "permissive", "max_line_length": 43, "num_lines": 13, "path": "/pyqt_examples/window2.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "'''Simple window with some custom values'''\nimport sys\nfrom PyQt4 import QtGui\n\napp = QtGui.QApplication(sys.argv)\n\nw = QtGui.QWidget()\nw.resize(300, 200)\nw.move(100, 100)\nw.setWindowTitle('Simple window')\nw.show()\n\nsys.exit(app.exec_())\n" }, { "alpha_fraction": 0.6302083134651184, "alphanum_fraction": 0.6319444179534912, "avg_line_length": 25.18181800842285, "blob_id": "821dadf60884556a86f3903d59704e20b6257114", "content_id": "67253c9c7401541017ae60084d25762da05d09b6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 576, "license_type": "permissive", "max_line_length": 61, "num_lines": 22, "path": "/pyqt_examples/message_box2.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "'''Simple message box with icon'''\nfrom PyQt4 import QtGui, QtCore\n\ndef message_box(message=\"Message\", title=\"Title\", icon=None):\n \"\"\"Simple message box.\n \"\"\"\n app = QtGui.QApplication([])\n box = QtGui.QMessageBox(None)\n if icon is not None:\n box.setWindowIcon(QtGui.QIcon(icon))\n else:\n box.setWindowIcon(QtGui.QIcon(\"images/reeborg.png\"))\n box.setWindowTitle(title)\n box.setText(message)\n box.show()\n box.exec_()\n app.quit()\n\n\nif __name__ == '__main__':\n message_box(\"Simple test\")\n message_box(icon=\"images/python.jpg\")\n" }, { "alpha_fraction": 0.5899306535720825, "alphanum_fraction": 0.591511607170105, "avg_line_length": 33.69620132446289, "blob_id": "4efcf3d41c634d5f2789f09c1434b24be7fbe1d0", "content_id": "f7c7a56f35205cd20b069ee6e3830499243eaa57", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8223, "license_type": "permissive", "max_line_length": 79, "num_lines": 237, "path": "/experiment/bguic.py", "repo_name": "aroberge/qt_py", "src_encoding": "UTF-8", "text": "\"\"\"Basic Graphical User Interface Components\n\"\"\"\nimport os\nimport collections\nimport functools\nimport inspect\nfrom PyQt4 import QtGui, QtCore\n\nCONFIG = {'font': QtGui.QFont(),\n 'translator': QtCore.QTranslator(),\n 'locale': 'default'}\n\n\ndef with_app(func):\n \"\"\"Intended to be used as a decorator to ensure that a single app\n is running before the function is called, and stopped afterwords\n \"\"\"\n\n def _decorator(*args, **kwargs):\n \"\"\"A single decorator would be enough to start an app before the\n function is called. By using an inner decorator, we can quit\n the app after the function is done.\n \"\"\"\n app = SimpleApp()\n kwargs['app'] = app\n try:\n response = func(*args, **kwargs)\n except TypeError: # perhaps 'app' was not a keyword argument for func\n sig = inspect.signature(func)\n if 'app' in sig.parameters.values():\n raise\n else:\n del kwargs['app']\n response = func(*args, **kwargs)\n app.quit()\n return response\n\n return functools.wraps(func)(_decorator)\n\n\ndef find_qm_files():\n \"\"\"looking for files with names == qt_locale.qm\"\"\"\n all_files = collections.OrderedDict()\n for root, _, files in os.walk(os.path.join(QtGui.__file__, \"..\")):\n for fname in files:\n if (fname.endswith('.qm') and fname.startswith(\"qt_\")\n and not fname.startswith(\"qt_help\")):\n locale = fname[3:-3]\n all_files[locale] = root\n return all_files\n\nQM_FILES = find_qm_files()\n\n\nclass SimpleApp(QtGui.QApplication):\n \"\"\"A simple extention of the basic QApplication\n with added methods useful for working with dialogs\n that are not class based.\n \"\"\"\n\n def __init__(self):\n super().__init__([])\n self.setFont(CONFIG['font'])\n self.set_locale(None) # recover locale set by previous run, if any ...\n\n def set_locale(self, locale):\n \"\"\"Sets the language of the basic controls for PyQt\n from a locale - provided that the corresponding qm files\n are present in the PyQt distribution.\n \"\"\"\n if locale in QM_FILES:\n if CONFIG['translator'].load(\"qt_\" + locale, QM_FILES[locale]):\n self.installTranslator(CONFIG['translator'])\n CONFIG['locale'] = locale\n else:\n print(\"language not available\")\n elif locale is \"default\" and CONFIG['locale'] != 'default':\n self.removeTranslator(CONFIG['translator'])\n CONFIG['translator'] = QtCore.QTranslator()\n CONFIG['locale'] = 'default'\n elif CONFIG['locale'] in QM_FILES:\n if CONFIG['translator'].load(\"qt_\" + CONFIG['locale'],\n QM_FILES[CONFIG['locale']]):\n self.installTranslator(CONFIG['translator'])\n\n\nclass _LanguageSelector(QtGui.QDialog):\n \"\"\"A specially constructed dialog which uses informations about\n available language (qm) files which can be used to change the\n default language of the basic PyQt ui components.\n \"\"\"\n\n def __init__(self, parent, title=\"Language selection\",\n name=\"Language codes\",\n instruction=\"Click button when you are done\"):\n super().__init__(None, QtCore.Qt.WindowSystemMenuHint |\n QtCore.Qt.WindowTitleHint)\n\n self.qm_files_choices = {}\n self.parent = parent\n\n # ========= check boxes ==============\n group_box = QtGui.QGroupBox(name)\n group_box_layout = QtGui.QGridLayout()\n for i, locale in enumerate(QM_FILES):\n check_box = QtGui.QCheckBox(locale)\n check_box.setAutoExclusive(True)\n self.qm_files_choices[check_box] = locale\n check_box.toggled.connect(self.check_box_toggled)\n group_box_layout.addWidget(check_box, i / 4, i % 4)\n # adding default language option. When using the PyQt distribution\n # no \"en\" files were found and yet \"en\" was the obvious default.\n # We need this option in case we want to revert a change.\n check_box = QtGui.QCheckBox(\"Default\")\n check_box.setAutoExclusive(True)\n self.qm_files_choices[check_box] = \"default\"\n check_box.toggled.connect(self.check_box_toggled)\n i = len(QM_FILES)\n group_box_layout.addWidget(check_box, i / 4, i % 4)\n group_box.setLayout(group_box_layout)\n\n # ========= buttons ==============\n button_box = QtGui.QDialogButtonBox()\n confirm_button = button_box.addButton(QtGui.QDialogButtonBox.Ok)\n confirm_button.clicked.connect(self.confirm)\n\n # ========= finalizing layout ====\n main_layout = QtGui.QVBoxLayout()\n main_layout.addWidget(group_box)\n main_layout.addWidget(QtGui.QLabel(instruction))\n main_layout.addWidget(button_box)\n self.setLayout(main_layout)\n self.setWindowTitle(title)\n\n def check_box_toggled(self):\n \"\"\"Callback when a checkbox is toggled\"\"\"\n self.locale = self.qm_files_choices[self.sender()]\n\n def confirm(self):\n \"\"\"Callback from confirm_button used to set the locale\"\"\"\n if self.locale != CONFIG['locale']:\n self.parent.set_locale(self.locale)\n self.close()\n\n\n@with_app\ndef set_global_font():\n \"\"\"GUI component to set default font\"\"\"\n font, ok = QtGui.QFontDialog.getFont(CONFIG['font'], None)\n if ok:\n CONFIG['font'] = font\n\n\n@with_app\ndef text_input(message=\"Enter your response\", title=\"Title\",\n default_response=\"\"):\n \"\"\"Simple text input box. Used to query the user and get a string back.\n \"\"\"\n flags = QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint\n text, ok = QtGui.QInputDialog.getText(None, title, message,\n QtGui.QLineEdit.Normal,\n default_response, flags)\n if ok:\n return text\n\n\n@with_app\ndef yes_no_question(question=\"Answer this question\", title=\"Title\"):\n \"\"\"Simple yes or no question; returns True for \"Yes\", False for \"No\"\n and None for \"Cancel\".\n \"\"\"\n flags = QtGui.QMessageBox.Yes | QtGui.QMessageBox.No\n flags |= QtGui.QMessageBox.Cancel\n\n reply = QtGui.QMessageBox.question(None, title, question, flags)\n if reply == QtGui.QMessageBox.Yes:\n return True\n elif reply == QtGui.QMessageBox.No:\n return False\n\n\n@with_app\ndef select_language(title=\"Select language\", name=\"Language codes\",\n instruction=\"Click button when you are done\", app=None):\n \"\"\"Dialog to choose language based on some locale code for\n files found on default path\n \"\"\"\n selector = _LanguageSelector(app, title=title, name=name,\n instruction=instruction)\n selector.exec_()\n\n\n@with_app\ndef set_locale(locale, app=None):\n \"\"\"Sets the locale, if available\"\"\"\n app.set_locale(locale)\n\n\n@with_app\ndef message_box(message=\"Message\", title=\"Title\"):\n \"\"\"Simple message box.\"\"\"\n box = QtGui.QMessageBox(None)\n box.setWindowTitle(title)\n box.setText(message)\n box.show()\n box.exec_()\n\n\n@with_app\ndef integer_input(message=\"Choose a number\", title=\"Title\",\n default_value=1, min_=0, max_=100, step=1):\n \"\"\"Simple dialog to ask a user to select a number within a certain range\"\"\"\n flags = QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint\n number, ok = QtGui.QInputDialog.getInteger(None,\n title, message,\n default_value, min_, max_, step,\n flags)\n if ok:\n return number\n\n\ndef set_font_size(font_size):\n \"\"\"Simple method to set font size.\n \"\"\"\n try:\n CONFIG['font'].setPointSize(font_size)\n except TypeError:\n print(\"font_size must be an integer\")\n\n\nif __name__ == '__main__':\n try:\n import bguic_demo\n\n bguic_demo.guessing_game()\n except ImportError:\n print(\"Could not find demo.\")\n" } ]
27
mmilleror/narc
https://github.com/mmilleror/narc
8f056f65fdb78452dd0514fabfd608ca47fb6794
aceeb15d129c9cb4c02316ed448264273566db31
583daa573c97d57a09ca834d13c826fbb415d7d7
refs/heads/master
2020-03-15T01:59:21.941833
2018-05-02T21:00:42
2018-05-02T21:00:42
131,907,156
1
0
null
2018-05-02T21:21:25
2018-05-02T21:01:01
2018-05-02T21:01:00
null
[ { "alpha_fraction": 0.4861266613006592, "alphanum_fraction": 0.5595371127128601, "avg_line_length": 29.91153907775879, "blob_id": "0879134db6b7a4acbb22f9f8c110398528db1ba1", "content_id": "cb8d7140bf757a52ca6c7fd3dc696aa94976f591", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8037, "license_type": "no_license", "max_line_length": 253, "num_lines": 260, "path": "/basejumper.py", "repo_name": "mmilleror/narc", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport glob\nimport re\nimport base64\nimport binascii\nimport zlib\nimport magic\n\n\nmimetype = magic.Magic(mime=True)\npastes_dir = '/home/ubuntu/pastes/'\t# trailing slash is IMPORTANT here\nBASE64_REGEX = re.compile('TV(oAAA|pBUl|pQAA|qAAA|qQAA|roAA)[A-Za-z0-9/+]{112,}[\\=]{0,2}')\nB64URLSAFE_REGEX = re.compile('TV(oAAA|pBUl|pQAA|qAAA|qQAA|roAA)[A-Za-z0-9_-]{112,}[\\=]{0,2}')\nDECSP_REGEX = re.compile('77\\ 90\\ (144\\ 0\\ 3\\ 0\\ 4\\ 0|232\\ 0\\ 0\\ 0\\ 0\\ 91|144\\ 0\\ 3\\ 0\\ 0\\ 0|80\\ 0\\ 2\\ 0\\ 0\\ 0|0\\ 0\\ 0\\ 0\\ 0\\ 0|65\\ 82\\ 85\\ 72\\ 137\\ 229|128\\ 0\\ 1\\ 0\\ 0\\ 0|144\\ 0\\ 3\\ 0\\ 4\\ 0|232\\ 0\\ 0\\ 0\\ 0\\ 91)[0-9\\ ]{254,}')\nDECCM_REGEX = re.compile('77,90,(144,0,3,0,4,0|232,0,0,0,0,91|144,0,3,0,0,0|80,0,2,0,0,0|0,0,0,0,0,0|65,82,85,72,137,229|128,0,1,0,0,0|144,0,3,0,4,0|232,0,0,0,0,91)[0-9,]{254,}[0-9]{1}')\nHEX_REGEX = re.compile('4d5a(00000000|41525548|50000200|80000100|90000300|e8000000)[a-f0-9]{254,}', re.IGNORECASE)\nHEXBASE_REGEX = re.compile('5456(71514141|70514141|6f414141|7042556c|71414141|726f4141)[a-f0-9]{254,}', re.IGNORECASE)\nBIN_REGEX = re.compile('0100110101011010(00000000000000000000000000000000|01000001010100100101010101001000|01010000000000000000001000000000|10000000000000000000000100000000|10010000000000000000001100000000|11101000000000000000000000000000)[0-1]{1000,}')\nGZ64_REGEX = re.compile('(H4sIA|tVVLb)[A-Za-z0-9/+]{252,}[\\=]{0,2}')\n\n\ndef decdump(text):\n if DECSP_REGEX.search(text):\n print(\"decimal matched\")\n match = DECSP_REGEX.search(text)\n try:\n elements = match.group(0).split(' ')\n frame = bytearray()\n for byte in elements:\n decimal = int(byte, 10)\n frame.append(decimal)\n bin = str(frame)\n return bin\n except:\n print(\"Error decoding decimal\")\n bin = \"ERR\"\n return bin\n elif DECCM_REGEX.search(text):\n print(\"decimal-comma matched\")\n match = DECCM_REGEX.search(text)\n try:\n elements = match.group(0).split(',')\n frame = bytearray()\n for byte in elements:\n decimal = int(byte, 10)\n frame.append(decimal)\n bin = str(frame)\n return bin\n except:\n print(\"Error decoding decimal\")\n bin = \"ERR\"\n return bin\n else:\n print(\"No decimal string found\")\n bin = \"ERR\"\n return bin\n\n\ndef bindump(text):\n if BIN_REGEX.search(text):\n print(\"binary matched\")\n match = BIN_REGEX.search(text)\n try:\n n = int(str('0b' + match.group(0)), 2)\n bin = binascii.unhexlify('%x' % n)\n return bin\n except:\n print(\"Error decoding binary\")\n bin = \"ERR\"\n return bin\n else:\n print(\"No binary string found\")\n bin = \"ERR\"\n return bin\n\n\ndef basedump(text):\n if BASE64_REGEX.search(text):\n print(\"base64 matched\")\n match = BASE64_REGEX.search(text)\n try:\n bin = base64.b64decode(match.group(0))\n return bin\n except:\n print(\"Error decoding base64\")\n bin = \"ERR\"\n return bin\n if B64URLSAFE_REGEX.search(text):\n print(\"urlsafe matched\")\n match = B64URLSAFE_REGEX.search(text)\n try:\n bin = base64.urlsafe_b64decode(match.group(0))\n return bin\n except:\n print(\"Error decoding urlsafe\")\n bin = \"ERR\"\n return bin\n else:\n print(\"No base64 string found\")\n bin = \"ERR\"\n return bin\n\n\ndef hexdump(text):\n if HEX_REGEX.search(text):\n print(\"hex matched\")\n match = HEX_REGEX.search(text)\n try:\n bin = match.group(0).decode('hex')\n return bin\n except:\n print(\"Error decoding hex\")\n bin = \"ERR\"\n return bin\n elif HEXBASE_REGEX.search(text):\n print(\"hexbase matched\")\n match = HEXBASE_REGEX.search(text)\n try:\n bin = match.group(0).decode('hex')\n return bin\n except:\n print(\"Error decoding hex\")\n bin = \"ERR\"\n return bin\n else:\n print(\"No hex string found\")\n bin = \"ERR\"\n return bin\n\n\ndef gz64dump(text):\n if GZ64_REGEX.search(text):\n print(\"basegzip matched\")\n match = GZ64_REGEX.search(text)\n frame = bytearray()\n try:\n for a in base64.b64decode(text):\n frame.append(a)\n except base64.error:\n print(\"Error decoding base64\")\n bin = \"ERR\"\n try:\n bin = zlib.decompress(bytes(frame), 15+32)\n except zlib.error:\n print(\"Error decompressing\")\n bin = \"ERR\"\n if mimetype.from_buffer(bin) == 'application/x-dosexec':\n return bin\n else:\n print(\"Error, not PE file\")\n bin = \"ERR\"\n return bin\n\n\ndef write_file(data, filename):\n if not os.path.exists(filename):\n file = open(filename, 'w')\n file.write(data)\n file.close()\n return\n else:\n print(\"paste already exists\")\n\n\nls = pastes_dir + '*.bin'\nbinlist = glob.glob(ls)\nfor filename in binlist:\n print(filename)\n raw=open(filename).readlines()\n for n,line in enumerate(raw):\n raw[n]=line.rstrip()\n raw[n]=raw[n].replace(\" \", \"\")\n raw = ''.join(raw)\n bin = bindump(raw)\n if not (bin == 'ERR'):\n base = os.path.basename(filename)\n binout = pastes_dir + os.path.splitext(base)[0] + '.exe'\n write_file(bin, binout)\n os.remove(filename)\n\nls = pastes_dir + '*.base64'\nbaselist = glob.glob(ls)\nfor filename in baselist:\n print filename\n raw=open(filename).readlines()\n for n,line in enumerate(raw):\n raw[n]=line.rstrip()\n raw = ''.join(raw)\n bin = basedump(raw)\n if not (bin == 'ERR'):\n base = os.path.basename(filename)\n binout = pastes_dir + os.path.splitext(base)[0] + '.exe'\n write_file(bin, binout)\n os.remove(filename)\n\nls = pastes_dir + '*.hex'\nhexlist = glob.glob(ls)\nfor filename in hexlist:\n print filename\n raw=open(filename).readlines()\n for n,line in enumerate(raw):\n raw[n]=line.rstrip()\n raw[n]=raw[n].replace(\" \", \"\")\n raw = ''.join(raw)\n bin = hexdump(raw)\n if not (bin == 'ERR'):\n base = os.path.basename(filename)\n binout = pastes_dir + os.path.splitext(base)[0] + '.exe'\n write_file(bin, binout)\n os.remove(filename)\n\nls = pastes_dir + '*.hexbase'\nhexblist = glob.glob(ls)\nfor filename in hexblist:\n print filename\n raw=open(filename).readlines()\n for n,line in enumerate(raw):\n raw[n]=line.rstrip()\n raw[n]=raw[n].replace(\" \", \"\")\n raw[n]=raw[n].replace(\"#\", \"A\")\n raw = ''.join(raw)\n bin = hexdump(raw)\n if not (bin == 'ERR'):\n bin = basedump(bin)\n if not (bin == 'ERR'):\n base = os.path.basename(filename)\n binout = pastes_dir + os.path.splitext(base)[0] + '.exe'\n write_file(bin, binout)\n os.remove(filename)\n\nls = pastes_dir + '*.dec'\ndeclist = glob.glob(ls)\nfor filename in declist:\n print filename\n raw=open(filename).readlines()\n for n,line in enumerate(raw):\n raw[n]=line.rstrip()\n raw = ''.join(raw)\n bin = decdump(raw)\n if not (bin == 'ERR'):\n base = os.path.basename(filename)\n binout = pastes_dir + os.path.splitext(base)[0] + '.exe'\n write_file(bin, binout)\n os.remove(filename)\n\nls = pastes_dir + '*.basegzip'\nbaselist = glob.glob(ls)\nfor filename in baselist:\n print filename\n raw=open(filename).readlines()\n for n,line in enumerate(raw):\n raw[n]=line.rstrip()\n raw = ''.join(raw)\n bin = gz64dump(raw)\n if not (bin == 'ERR'):\n base = os.path.basename(filename)\n binout = pastes_dir + os.path.splitext(base)[0] + '.exe'\n write_file(bin, binout)\n os.remove(filename)\n" }, { "alpha_fraction": 0.46143046021461487, "alphanum_fraction": 0.6256607174873352, "avg_line_length": 36.48606872558594, "blob_id": "fb7dc8ace9aa509ea14ae70b1a765ea964020e36", "content_id": "ef085a4893ae3956076a95fc505b9aab9cbce713", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24216, "license_type": "no_license", "max_line_length": 161, "num_lines": 646, "path": "/pasteater.py", "repo_name": "mmilleror/narc", "src_encoding": "UTF-8", "text": "import requests\nimport os\nimport time\n\nstart = time.time()\nurl_pastebin_scraping = 'https://scrape.pastebin.com/api_scraping.php'\nlimit = 250\npastes_dir = '/home/ubuntu/pastes/' # Trailing slash is important here!\noriginals_dir = '/home/ubuntu/pastes/origraw/' # Trailing slash is important here!\nlogfile = pastes_dir + 'pastes.log'\n\n\ndef posh_find(text):\n if \"[System.Convert]::\" in text:\n return True\n if \"FromBase64String(\" in text:\n return True\n if \"New-Object System.IO.\" in text:\n return True\n if \"[System.Net.\" in text:\n return True\n if \"System.Reflection.AssemblyName\" in text:\n return True\n\n\ndef dec_find(text):\n if '77 90 144 0 3 0 4 0' in text:\n return True\n if '77 90 232 0 0 0 0 91' in text:\n return True\n if '77 90 144 0 3 0 0 0' in text:\n return True\n if '77 90 80 0 2 0 0 0' in text:\n return True\n if '77 90 0 0 0 0 0 0' in text:\n return True\n if '77 90 65 82 85 72 137 229' in text:\n return True\n if '77 90 128 0 1 0 0 0' in text:\n return True\n if '77,90,144,0,3,0,4,0,' in text:\n return True\n if '77,90,232,0,0,0,0,91,' in text:\n return True\n if '77,90,144,0,3,0,0,0,' in text:\n return True\n if '77,90,80,0,2,0,0,0,' in text:\n return True\n if '77,90,0,0,0,0,0,0,' in text:\n return True\n if '77,90,65,82,85,72,137,229,' in text:\n return True\n if '77,90,128,0,1,0,0,0,' in text:\n return True\n if '77, 90, 144, 0, 3, 0, 4, 0,' in text:\n return True\n if '77, 90, 232, 0, 0, 0, 0, 91,' in text:\n return True\n if '77, 90, 144, 0, 3, 0, 0, 0,' in text:\n return True\n if '77, 90, 80, 0, 2, 0, 0, 0,' in text:\n return True\n if '77, 90, 0, 0, 0, 0, 0, 0,' in text:\n return True\n if '77, 90, 65, 82, 85, 72, 137, 229,' in text:\n return True\n if '77, 90, 128, 0, 1, 0, 0, 0,' in text:\n return True\n\n\ndef bin_find(text):\n if '010011010101101000000000000000000000000000000000' in text:\n return True\n if '010011010101101001000001010100100101010101001000' in text:\n return True\n if '010011010101101001010000000000000000001000000000' in text:\n return True\n if '010011010101101010000000000000000000000100000000' in text:\n return True\n if '010011010101101010010000000000000000001100000000' in text:\n return True\n if '010011010101101011101000000000000000000000000000' in text:\n return True\n if '0100 1101 0101 1010 0000 0000 0000 0000 0000 0000 0000 0000' in text:\n return True\n if '0100 1101 0101 1010 0100 0001 0101 0010 0101 0101 0100 1000' in text:\n return True\n if '0100 1101 0101 1010 0101 0000 0000 0000 0000 0010 0000 0000' in text:\n return True\n if '0100 1101 0101 1010 1000 0000 0000 0000 0000 0001 0000 0000' in text:\n return True\n if '0100 1101 0101 1010 1001 0000 0000 0000 0000 0011 0000 0000' in text:\n return True\n if '0100 1101 0101 1010 1110 1000 0000 0000 0000 0000 0000 0000' in text:\n return True\n if '01 00 11 01 01 01 10 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00' in text:\n return True\n if '01 00 11 01 01 01 10 10 01 00 00 01 01 01 00 10 01 01 01 01 01 00 10 00' in text:\n return True\n if '01 00 11 01 01 01 10 10 01 01 00 00 00 00 00 00 00 00 00 10 00 00 00 00' in text:\n return True\n if '01 00 11 01 01 01 10 10 10 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00' in text:\n return True\n if '01 00 11 01 01 01 10 10 10 01 00 00 00 00 00 00 00 00 00 11 00 00 00 00' in text:\n return True\n if '01 00 11 01 01 01 10 10 11 10 10 00 00 00 00 00 00 00 00 00 00 00 00 00' in text:\n return True\n\n\ndef b64_find(text):\n if 'TVqQAAMAAAAEAAAA' in text:\n return True\n if 'TVpQAAIAAAAEAA8A' in text:\n return True\n if 'TVoAAAAAAAAAAAAA' in text:\n return True\n if 'TVpBUlVIieVIgewg' in text:\n return True\n if 'TVqAAAEAAAAEABAA' in text:\n return True\n if 'TVroAAAAAFtSRVWJ' in text:\n return True\n if 'TVqQAAMABAAAAAAA' in text:\n return True\n\n\ndef doublebase_find(text):\n if 'VFZxUUFBTUFBQUFFQUFBQ' in text:\n return True\n if 'VFZwUUFBSUFBQUFFQUE4Q' in text:\n return True\n if 'VFZvQUFBQUFBQUFBQUFBQ' in text:\n return True\n if 'VFZwQlVsVklpZVZJZ2V3Z' in text:\n return True\n if 'VFZxQUFBRUFBQUFFQUJBQ' in text:\n return True\n if 'VFZyb0FBQUFBRnRTUlZXS' in text:\n return True\n if 'VFZxUUFBTUFCQUFBQUFBQ' in text:\n return True\n\n\ndef hex_find(text):\n if '4d5a900003000000' in text:\n return True\n if '4D5A900003000000' in text:\n return True\n if '4d5a500002000000' in text:\n return True\n if '4D5A500002000000' in text:\n return True\n if '4d5a000000000000' in text:\n return True\n if '4D5A000000000000' in text:\n return True\n if '4d5a4152554889e5' in text:\n return True\n if '4D5A4152554889E5' in text:\n return True\n if '4d5a800001000000' in text:\n return True\n if '4D5A800001000000' in text:\n return True\n if '4d5a900003000400' in text:\n return True\n if '4D5A900003000400' in text:\n return True\n if '4d5ae8000000005b' in text:\n return True\n if '4D5AE8000000005B' in text:\n return True\n if '4d 5a 90 00 03 00 04 00' in text:\n return True\n if '4D 5A 90 00 03 00 04 00' in text:\n return True\n if '4d 5a e8 00 00 00 00 5b' in text:\n return True\n if '4D 5A E8 00 00 00 00 5B' in text:\n return True\n if '4d 5a 90 00 03 00 00 00' in text:\n return True\n if '4D 5A 90 00 03 00 00 00' in text:\n return True\n if '4d 5a 50 00 02 00 00 00' in text:\n return True\n if '4D 5A 50 00 02 00 00 00' in text:\n return True\n if '4d 5a 00 00 00 00 00 00' in text:\n return True\n if '4D 5A 00 00 00 00 00 00' in text:\n return True\n if '4d 5a 41 52 55 48 89 e5' in text:\n return True\n if '4D 5A 41 52 55 48 89 E5' in text:\n return True\n if '4d 5a 80 00 01 00 00 00' in text:\n return True\n if '4D 5A 80 00 01 00 00 00' in text:\n return True\n if '4d 5a 90 00 03 00 04 00' in text:\n return True\n if '4D 5A 90 00 03 00 04 00' in text:\n return True\n if '4d 5a e8 00 00 00 00 5b' in text:\n return True\n if '4D 5A E8 00 00 00 00 5B' in text:\n return True\n if '0x4d,0x5a,0x90,0x00,0x03,0x00,0x04,0x00' in text:\n return True\n if '0x4D,0x5A,0x90,0x00,0x03,0x00,0x04,0x00' in text:\n return True\n if '0x4d,0x5a,0xe8,0x00,0x00,0x00,0x00,0x5b' in text:\n return True\n if '0x4D,0x5A,0xE8,0x00,0x00,0x00,0x00,0x5B' in text:\n return True\n if '0x4d,0x5a,0x90,0x00,0x03,0x00,0x00,0x00' in text:\n return True\n if '0x4D,0x5A,0x90,0x00,0x03,0x00,0x00,0x00' in text:\n return True\n if '0x4d,0x5a,0x50,0x00,0x02,0x00,0x00,0x00' in text:\n return True\n if '0x4D,0x5A,0x50,0x00,0x02,0x00,0x00,0x00' in text:\n return True\n if '0x4d,0x5a,0x00,0x00,0x00,0x00,0x00,0x00' in text:\n return True\n if '0x4D,0x5A,0x00,0x00,0x00,0x00,0x00,0x00' in text:\n return True\n if '0x4d,0x5a,0x41,0x52,0x55,0x48,0x89,0xe5' in text:\n return True\n if '0x4D,0x5A,0x41,0x52,0x55,0x48,0x89,0xE5' in text:\n return True\n if '0x4d,0x5a,0x80,0x00,0x01,0x00,0x00,0x00' in text:\n return True\n if '0x4D,0x5A,0x80,0x00,0x01,0x00,0x00,0x00' in text:\n return True\n if '0x4d,0x5a,0x90,0x00,0x03,0x00,0x04,0x00' in text:\n return True\n if '0x4D,0x5A,0x90,0x00,0x03,0x00,0x04,0x00' in text:\n return True\n if '0x4d,0x5a,0xe8,0x00,0x00,0x00,0x00,0x5b' in text:\n return True\n if '0x4D,0x5A,0xE8,0x00,0x00,0x00,0x00,0x5B' in text:\n return True\n\n\ndef hexbase_find(text):\n if '5456715141414d414141414541414141' in text:\n return True\n if '5456715141414D414141414541414141' in text:\n return True\n if '54 56 71 51 41 41 4d 41 41 41 41 45 41 41 41 41' in text:\n return True\n if '54 56 71 51 41 41 4D 41 41 41 41 45 41 41 41 41' in text:\n return True\n if '54567051414149414141414541413841' in text:\n return True\n if '54 56 70 51 41 41 49 41 41 41 41 45 41 41 38 41' in text:\n return True\n if '54566f41414141414141414141414141' in text:\n return True\n if '54566F41414141414141414141414141' in text:\n return True\n if '54 56 6f 41 41 41 41 41 41 41 41 41 41 41 41 41' in text:\n return True\n if '54 56 6F 41 41 41 41 41 41 41 41 41 41 41 41 41' in text:\n return True\n if '54567042556c56496965564967657767' in text:\n return True\n if '54567042556C56496965564967657767' in text:\n return True\n if '54 56 70 42 55 6c 56 49 69 65 56 49 67 65 77 67' in text:\n return True\n if '54 56 70 42 55 6C 56 49 69 65 56 49 67 65 77 67' in text:\n return True\n if '54567141414145414141414541424141' in text:\n return True\n if '54 56 71 41 41 41 45 41 41 41 41 45 41 42 41 41' in text:\n return True\n if '5456726f41414141414674535256574a' in text:\n return True\n if '5456726F41414141414674535256574A' in text:\n return True\n if '54 56 72 6f 41 41 41 41 41 46 74 53 52 56 57 4a' in text:\n return True\n if '54 56 72 6F 41 41 41 41 41 46 74 53 52 56 57 4A' in text:\n return True\n if '5456715141414d414241414141414141' in text:\n return True\n if '5456715141414D414241414141414141' in text:\n return True\n if '54 56 71 51 41 41 4d 41 42 41 41 41 41 41 41 41' in text:\n return True\n if '54 56 71 51 41 41 4D 41 42 41 41 41 41 41 41 41' in text:\n return True\n\n\ndef basehex_find(text):\n if 'NGQ1YTkwMDAwMzAwMDAwMA' in text:\n return True\n if 'NEQ1QTkwMDAwMzAwMDAwMA' in text:\n return True\n if 'NGQ1YTUwMDAwMjAwMDAwMA' in text:\n return True\n if 'NEQ1QTUwMDAwMjAwMDAwMA' in text:\n return True\n if 'NGQ1YTAwMDAwMDAwMDAwMA' in text:\n return True\n if 'NEQ1QTAwMDAwMDAwMDAwMA' in text:\n return True\n if 'NGQ1YTQxNTI1NTQ4ODllNQ' in text:\n return True\n if 'NEQ1QTQxNTI1NTQ4ODlFNQ' in text:\n return True\n if 'NGQ1YTgwMDAwMTAwMDAwMA' in text:\n return True\n if 'NEQ1QTgwMDAwMTAwMDAwMA' in text:\n return True\n if 'NGQ1YTkwMDAwMzAwMDQwMA' in text:\n return True\n if 'NEQ1QTkwMDAwMzAwMDQwMA' in text:\n return True\n if 'NGQ1YWU4MDAwMDAwMDA1Yg' in text:\n return True\n if 'NEQ1QUU4MDAwMDAwMDA1Qg' in text:\n return True\n if 'NGQgNWEgOTAgMDAgMDMgMDAgMDQgMDA' in text:\n return True\n if 'NEQgNUEgOTAgMDAgMDMgMDAgMDQgMDA' in text:\n return True\n if 'NGQgNWEgZTggMDAgMDAgMDAgMDAgNWI' in text:\n return True\n if 'NEQgNUEgRTggMDAgMDAgMDAgMDAgNUI' in text:\n return True\n if 'NGQgNWEgOTAgMDAgMDMgMDAgMDAgMDA' in text:\n return True\n if 'NEQgNUEgOTAgMDAgMDMgMDAgMDAgMDA' in text:\n return True\n if 'NGQgNWEgNTAgMDAgMDIgMDAgMDAgMDA' in text:\n return True\n if 'NEQgNUEgNTAgMDAgMDIgMDAgMDAgMDA' in text:\n return True\n if 'NGQgNWEgMDAgMDAgMDAgMDAgMDAgMDA' in text:\n return True\n if 'NEQgNUEgMDAgMDAgMDAgMDAgMDAgMDA' in text:\n return True\n if 'NGQgNWEgNDEgNTIgNTUgNDggODkgZTU' in text:\n return True\n if 'NEQgNUEgNDEgNTIgNTUgNDggODkgRTU' in text:\n return True\n if 'NGQgNWEgODAgMDAgMDEgMDAgMDAgMDA' in text:\n return True\n if 'NEQgNUEgODAgMDAgMDEgMDAgMDAgMDA' in text:\n return True\n if 'NGQgNWEgOTAgMDAgMDMgMDAgMDQgMDA' in text:\n return True\n if 'NEQgNUEgOTAgMDAgMDMgMDAgMDQgMDA' in text:\n return True\n if 'NGQgNWEgZTggMDAgMDAgMDAgMDAgNWI' in text:\n return True\n if 'NEQgNUEgRTggMDAgMDAgMDAgMDAgNUI' in text:\n return True\n if 'MHg0ZCwweDVhLDB4OTAsMHgwMCwweDAzLDB4MDAsMHgwNCwweDAwCg' in text:\n return True\n if 'MHg0RCwweDVBLDB4OTAsMHgwMCwweDAzLDB4MDAsMHgwNCwweDAwCg' in text:\n return True\n if 'MHg0ZCwweDVhLDB4ZTgsMHgwMCwweDAwLDB4MDAsMHgwMCwweDViCg' in text:\n return True\n if 'MHg0RCwweDVBLDB4RTgsMHgwMCwweDAwLDB4MDAsMHgwMCwweDVCCg' in text:\n return True\n if 'MHg0ZCwweDVhLDB4OTAsMHgwMCwweDAzLDB4MDAsMHgwMCwweDAwCg' in text:\n return True\n if 'MHg0RCwweDVBLDB4OTAsMHgwMCwweDAzLDB4MDAsMHgwMCwweDAwCg' in text:\n return True\n if 'MHg0ZCwweDVhLDB4NTAsMHgwMCwweDAyLDB4MDAsMHgwMCwweDAwCg' in text:\n return True\n if 'MHg0RCwweDVBLDB4NTAsMHgwMCwweDAyLDB4MDAsMHgwMCwweDAwCg' in text:\n return True\n if 'MHg0ZCwweDVhLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwCg' in text:\n return True\n if 'MHg0RCwweDVBLDB4MDAsMHgwMCwweDAwLDB4MDAsMHgwMCwweDAwCg' in text:\n return True\n if 'MHg0ZCwweDVhLDB4NDEsMHg1MiwweDU1LDB4NDgsMHg4OSwweGU1Cg' in text:\n return True\n if 'MHg0RCwweDVBLDB4NDEsMHg1MiwweDU1LDB4NDgsMHg4OSwweEU1Cg' in text:\n return True\n if 'MHg0ZCwweDVhLDB4ODAsMHgwMCwweDAxLDB4MDAsMHgwMCwweDAwCg' in text:\n return True\n if 'MHg0RCwweDVBLDB4ODAsMHgwMCwweDAxLDB4MDAsMHgwMCwweDAwCg' in text:\n return True\n if 'MHg0ZCwweDVhLDB4OTAsMHgwMCwweDAzLDB4MDAsMHgwNCwweDAwCg' in text:\n return True\n if 'MHg0RCwweDVBLDB4OTAsMHgwMCwweDAzLDB4MDAsMHgwNCwweDAwCg' in text:\n return True\n if 'MHg0ZCwweDVhLDB4ZTgsMHgwMCwweDAwLDB4MDAsMHgwMCwweDViCg' in text:\n return True\n if 'MHg0RCwweDVBLDB4RTgsMHgwMCwweDAwLDB4MDAsMHgwMCwweDVCCg' in text:\n return True\n\n\ndef hexbin_find(text):\n if '303130303131303130313031313031303030303030303030303030303030303030303030303030303030303030303030' in text:\n return True\n if '303130303131303130313031313031303031303030303031303130313030313030313031303130313031303031303030' in text:\n return True\n if '303130303131303130313031313031303031303130303030303030303030303030303030303031303030303030303030' in text:\n return True\n if '303130303131303130313031313031303130303030303030303030303030303030303030303030313030303030303030' in text:\n return True\n if '303130303131303130313031313031303130303130303030303030303030303030303030303031313030303030303030' in text:\n return True\n if '303130303131303130313031313031303131313031303030303030303030303030303030303030303030303030303030' in text:\n return True\n if '30 31 30 30 31 31 30 31 30 31 30 31 31 30 31 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30' in text:\n return True\n if '30 31 30 30 31 31 30 31 30 31 30 31 31 30 31 30 30 31 30 30 30 30 30 31 30 31 30 31 30 30 31 30 30 31 30 31 30 31 30 31 30 31 30 30 31 30 30 30' in text:\n return True\n if '30 31 30 30 31 31 30 31 30 31 30 31 31 30 31 30 30 31 30 31 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 31 30 30 30 30 30 30 30 30 30' in text:\n return True\n if '30 31 30 30 31 31 30 31 30 31 30 31 31 30 31 30 31 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 31 30 30 30 30 30 30 30 30' in text:\n return True\n if '30 31 30 30 31 31 30 31 30 31 30 31 31 30 31 30 31 30 30 31 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 31 31 30 30 30 30 30 30 30 30' in text:\n return True\n if '30 31 30 30 31 31 30 31 30 31 30 31 31 30 31 30 31 31 31 30 31 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30' in text:\n return True\n\n\ndef basebin_find(text):\n if 'MDEwMDExMDEwMTAxMTAxMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw' in text:\n return True\n if 'MDEwMDExMDEwMTAxMTAxMDAxMDAwMDAxMDEwMTAwMTAwMTAxMDEwMTAxMDAxMDAw' in text:\n return True\n if 'MDEwMDExMDEwMTAxMTAxMDAxMDEwMDAwMDAwMDAwMDAwMDAwMDAxMDAwMDAwMDAw' in text:\n return True\n if 'MDEwMDExMDEwMTAxMTAxMDEwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMTAwMDAwMDAw' in text:\n return True\n if 'MDEwMDExMDEwMTAxMTAxMDEwMDEwMDAwMDAwMDAwMDAwMDAwMDAxMTAwMDAwMDAw' in text:\n return True\n if 'MDEwMDExMDEwMTAxMTAxMDExMTAxMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAw' in text:\n return True\n if 'MDEwMCAxMTAxIDAxMDEgMTAxMCAwMDAwIDAwMDAgMDAwMCAwMDAwIDAwMDAgMDAwMCAwMDAwIDAw' in text:\n return True\n if 'MDEwMCAxMTAxIDAxMDEgMTAxMCAwMTAwIDAwMDEgMDEwMSAwMDEwIDAxMDEgMDEwMSAwMTAwIDEw' in text:\n return True\n if 'MDEwMCAxMTAxIDAxMDEgMTAxMCAwMTAxIDAwMDAgMDAwMCAwMDAwIDAwMDAgMDAxMCAwMDAwIDAw' in text:\n return True\n if 'MDEwMCAxMTAxIDAxMDEgMTAxMCAxMDAwIDAwMDAgMDAwMCAwMDAwIDAwMDAgMDAwMSAwMDAwIDAw' in text:\n return True\n if 'MDEwMCAxMTAxIDAxMDEgMTAxMCAxMDAxIDAwMDAgMDAwMCAwMDAwIDAwMDAgMDAxMSAwMDAwIDAw' in text:\n return True\n if 'MDEwMCAxMTAxIDAxMDEgMTAxMCAxMTEwIDEwMDAgMDAwMCAwMDAwIDAwMDAgMDAwMCAwMDAwIDAw' in text:\n return True\n if 'MDEgMDAgMTEgMDEgMDEgMDEgMTAgMTAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMD' in text:\n return True\n if 'MDEgMDAgMTEgMDEgMDEgMDEgMTAgMTAgMDEgMDAgMDAgMDEgMDEgMDEgMDAgMTAgMDEgMDEgMDEgMDEgMDEgMDAgMTAgMD' in text:\n return True\n if 'MDEgMDAgMTEgMDEgMDEgMDEgMTAgMTAgMDEgMDEgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMTAgMDAgMDAgMDAgMD' in text:\n return True\n if 'MDEgMDAgMTEgMDEgMDEgMDEgMTAgMTAgMTAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDEgMDAgMDAgMDAgMD' in text:\n return True\n if 'MDEgMDAgMTEgMDEgMDEgMDEgMTAgMTAgMTAgMDEgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMTEgMDAgMDAgMDAgMD' in text:\n return True\n if 'MDEgMDAgMTEgMDEgMDEgMDEgMTAgMTAgMTEgMTAgMTAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMDAgMD' in text:\n return True\n\n\ndef basegzip_find(text):\n if 'tVVLb' in text:\n return True\n if 'H4sIA' in text:\n return True\n\n\ndef save_file(text, type, key):\n print('%s: %s' % (type, key))\n outfile = pastes_dir + key + \".\" + type\n if not os.path.exists(outfile):\n file = open(outfile, 'w')\n file.write(text)\n file.close()\n return\n else:\n print(\"paste already exists: \" + outfile)\n return\n\n\ndef save_raw(text, key):\n rawfile = originals_dir + key\n if not os.path.exists(rawfile):\n file = open(rawfile, 'w')\n file.write(text)\n file.close()\n return\n else:\n print(\"paste already exists: \" + rawfile)\n return\n\n\nparams = {'limit': limit}\nr = requests.get(url_pastebin_scraping, params)\nresponse = r.json()\n\nlogfile = open(logfile, 'a+')\ncounter = 0\nbytes = 0\nfor paste in response:\n title = paste[\"title\"]\n type = paste[\"syntax\"]\n expire = paste[\"expire\"]\n user = paste[\"user\"]\n key = paste[\"key\"]\n date = paste[\"date\"]\n size = int(paste[\"size\"])\n if (type == 'text' and size > 5000):\n counter += 1\n bytes += size\n url = paste[\"scrape_url\"]\n r = requests.get(url)\n if b64_find(r.content):\n type = \"base64\"\n save_file(r.content, type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if hex_find(r.content):\n type = \"hex\"\n save_file(r.content, type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if bin_find(r.content):\n type = \"bin\"\n save_file(r.content, type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if hexbase_find(r.content):\n type = \"hexbase\"\n save_file(r.content, type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if dec_find(r.content):\n type = \"dec\"\n save_file(r.content, type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if posh_find(r.content):\n type = \"posh\"\n save_file(r.content, type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if doublebase_find(r.content):\n type = \"2xbase64\"\n save_file(r.content, type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if b64_find(r.content[::-1]):\n type = \"base64\"\n save_file(r.content[::-1], type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if hex_find(r.content[::-1]):\n type = \"hex\"\n save_file(r.content[::-1], type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if bin_find(r.content[::-1]):\n type = \"bin\"\n save_file(r.content[::-1], type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if hexbase_find(r.content[::-1]):\n type = \"hexbase\"\n save_file(r.content[::-1], type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if dec_find(r.content[::-1]):\n type = \"dec\"\n save_file(r.content[::-1], type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if doublebase_find(r.content[::-1]):\n type = \"2xbase64\"\n save_file(r.content[::-1], type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n\n# EXPERIMENTAL DETECTION BELOW THIS LINE\n\n if hexbin_find(r.content):\n type = \"hexbin\"\n save_file(r.content, type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if hexbin_find(r.content[::-1]):\n type = \"hexbin\"\n save_file(r.content[::-1], type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if basebin_find(r.content):\n type = \"basebin\"\n save_file(r.content, type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if basebin_find(r.content[::-1]):\n type = \"basebin\"\n save_file(r.content, type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if basehex_find(r.content):\n type = \"basehex\"\n save_file(r.content, type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if basehex_find(r.content[::-1]):\n type = \"basehex\"\n save_file(r.content[::-1], type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n if basegzip_find(r.content):\n type = \"basegzip\"\n save_file(r.content, type, key)\n save_raw(r.content, key)\n logfile.write('%s,%s,%s,%s,%s,%s\\n' % (type, key, title, user, date, expire))\n break\n\nend = time.time()\nprint(\"documents read: \" + str(counter))\nprint(\"bytes read: \" + str(bytes))\nprint(\"run time: \" + str(end - start))\n" }, { "alpha_fraction": 0.5467900037765503, "alphanum_fraction": 0.5663765072822571, "avg_line_length": 29.296703338623047, "blob_id": "c43e675df8f5a71709362518e55eb94c43da6173", "content_id": "9f49e275bc49059d07587964499ee2a0d8b02f61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5514, "license_type": "no_license", "max_line_length": 133, "num_lines": 182, "path": "/narc.py", "repo_name": "mmilleror/narc", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport glob\nimport re\nimport requests\nimport json\nimport urllib\nimport urllib2\nimport dns.resolver\nimport hashlib\nimport time\nfrom twitter import *\n\nvti_api_key = '[your VirusTotal public API key here'\nvti_upload_url = 'https://www.virustotal.com/vtapi/v2/file/scan'\nvti_comment_url = 'https://www.virustotal.com/vtapi/v2/comments/put'\nbamfdetect = '/home/ubuntu/src/bamfdetect/bamfdetect'\npastes_dir = '/home/ubuntu/pastes/'\ndone_dir = '/home/ubuntu/pastes/done/'\nrsrch_dir = '/home/ubuntu/pastes/research/'\nlogfile = pastes_dir + 'c2out.json'\nDOMAIN_REGEX = re.compile('([a-z0-9][a-z0-9\\-]{0,61}[a-z0-9]\\.)+[a-z0-9][a-z0-9\\-]*[a-z0-9]', re.IGNORECASE)\nIPV4_REGEX = re.compile('[1-2]?[0-9]?[0-9]\\.[1-2]?[0-9]?[0-9]\\.[1-2]?[0-9]?[0-9]\\.[1-2]?[0-9]?[0-9]')\nipinfo_url = 'http://ipinfo.io/'\nmyResolver = dns.resolver.Resolver()\nmyResolver.nameservers = ['8.8.8.8']\nconfig = {}\nexecfile(\"/home/ubuntu/twitter_config.py\", config) # Twitter API creds (https://github.com/bear/python-twitter/wiki)\n\n\ndef BAMFrun(file):\n runcmd = bamfdetect + \" \" + file\n bamfout = os.popen(runcmd).read().rstrip(',\\n')\n try:\n result = json.loads(bamfout)\n for filekey in result.keys():\n type = result[filekey][\"type\"]\n hash = result[filekey][\"postprocessor\"][\"sha256\"]\n try:\n c2 = result[filekey][\"information\"][\"c2_uri\"]\n except:\n c2 = \"\"\n for a in result[filekey][\"information\"][\"c2s\"]:\n c2+=a[\"c2_uri\"]+\",\"\n return type,hash,c2\n except:\n with open(file) as f:\n raw = f.read()\n f.close()\n m = hashlib.md5()\n m.update(raw)\n hash = m.hexdigest()\n return \"None\",hash,\"None\"\n\n\ndef tweet(status):\n try:\n twitter = Twitter(auth = OAuth(config[\"access_key\"], config[\"access_secret\"], config[\"consumer_key\"], config[\"consumer_secret\"]))\n results = twitter.statuses.update(status = status)\n print \"updated status: %s\" % status\n except:\n print status\n print \"Error posting to Twitter!\"\n sys.exit(1)\n\n\ndef vt_upload(file):\n params = {'apikey': vti_api_key}\n files = {'file': (file, open(file, 'rb'))}\n r = requests.post(vti_upload_url, files=files, params=params)\n response = r.json()\n\ndef vt_comment(comment,md5):\n params = {\"resource\": md5,\n \"comment\": comment,\n \"apikey\": vti_api_key}\n data = urllib.urlencode(params)\n req = urllib2.Request(vti_comment_url, data)\n r = urllib2.urlopen(req)\n\n\ndef isip(string):\n if IPV4_REGEX.search(string) and string != \"127.0.0.1\":\n return True\n\n\ndef getipinfo(ipaddr):\n if isip(ipaddr):\n url = ipinfo_url + ipaddr\n r = requests.get(url)\n if r.status_code == 200:\n response = r.json()\n loc = response[\"loc\"]\n city = response[\"city\"]\n region = response[\"region\"]\n try:\n hostname = response[\"hostname\"]\n except:\n hostname = ''\n country = response[\"country\"]\n org = response[\"org\"]\n try:\n postal = response[\"postal\"]\n except:\n postal = ''\n return loc,city,region,hostname,country,org,postal\n else:\n print \"Problem connecting to: \" + url\n sys.exit(1)\n else:\n print ipaddr + ' is not an IP address'\n sys.exit(1)\n\n\nls = pastes_dir + '*.exe'\nlogfile = open(logfile, 'a+')\nexelist = glob.glob(ls)\nfor filename in exelist:\n base = os.path.basename(filename)\n paste = os.path.splitext(base)[0]\n type,hash,c2 = BAMFrun(filename)\n stored_file = done_dir + base + \"_\" + hash\n stored_file = done_dir + base + \"_\" + hash\n if not os.path.isfile(stored_file) and not (type == 'None'):\n vt_upload(filename)\n c2safe = c2.replace(\".\", \"[.]\")\n message = type + \" found at https://pastebin.com/\" + paste + \" SHA256: \" + hash + \" C2: \" + c2safe\n if len(message) > 280:\n message = message[:280]\n tweet(message)\n time.sleep(30)\n comment = type + \" found at https://pastebin.com/\" + paste + \" SHA256: \" + hash + \" C2: \" + c2\n vt_comment(comment,hash)\n new_filename = stored_file\n os.rename(filename, new_filename)\n for a in c2.split(','):\n try:\n ipaddr = IPV4_REGEX.search(a).group(0)\n fqdn = \"\"\n except:\n ipaddr = \"err\"\n if ipaddr == \"err\":\n try:\n fqdn = DOMAIN_REGEX.search(a).group(0)\n except:\n fqdn = \"err\"\n if fqdn != \"err\":\n try:\n ipaddr = str(myResolver.query(fqdn, 'A')[0])\n except:\n ipaddr = \"err\"\n loc = \"err\"\n city = \"err\"\n region = \"err\"\n hostname = \"err\"\n country = \"err\"\n org = \"err\"\n postal = \"err\"\n if ipaddr != \"err\":\n loc,city,region,hostname,country,org,postal = getipinfo(ipaddr)\n logentry = {\n 'paste':str(paste),\n 'hash':str(hash),\n 'type':str(type),\n 'c2':str(c2),\n 'fqdn':str(fqdn),\n 'ipaddr':str(ipaddr),\n 'loc':(loc).encode('utf-8'),\n 'city':(city).encode('utf-8'),\n 'region':(region).encode('utf-8'),\n 'hostname':str(hostname),\n 'country':str(country),\n 'org':str(org),\n 'postal':str(postal)\n }\n jlo = json.dumps(logentry)\n logfile.write(jlo + \"\\n\")\n elif (type == 'None') and not os.path.isfile(stored_file):\n new_filename = rsrch_dir + base + \"_\" + hash\n os.rename(filename, new_filename)\n else:\n os.remove(filename)\n" }, { "alpha_fraction": 0.5550261735916138, "alphanum_fraction": 0.5559790134429932, "avg_line_length": 45.66666793823242, "blob_id": "14c98b10259774ab64884d6f646579ac0ae2cbaf", "content_id": "54ad0c87b905d7fa759f6a0a62ec4c46a91ab07b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2099, "license_type": "permissive", "max_line_length": 120, "num_lines": 45, "path": "/bamfdetect/bamfdetect", "repo_name": "mmilleror/narc", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\nimport BAMF_Detect\nimport json\nfrom multiprocessing import cpu_count\n\n\nif __name__ == \"__main__\":\n from argparse import ArgumentParser\n\n parser = ArgumentParser(\n prog=__file__,\n description=\"Identifies and extracts information from bots\",\n version=\"%(prog)s v\" + BAMF_Detect.get_version() + \" by Brian Wallace (@botnet_hunter)\",\n epilog=\"%(prog)s v\" + BAMF_Detect.get_version() + \" by Brian Wallace (@botnet_hunter)\"\n )\n parser.add_argument('path', metavar='path', type=str, nargs='*', default=None,\n help=\"Paths to files or directories to scan\")\n parser.add_argument('-d', '--detect', default=False, required=False, action='store_true', help=\"Only detect files\")\n parser.add_argument('-r', '--recursive', default=False, required=False, action='store_true',\n help=\"Scan paths recursively\")\n parser.add_argument('-l', '--list', default=False, required=False, action='store_true',\n help='List available modules')\n parser.add_argument('-m', '--module', default=None, type=str, action='append', help='Modules to use, if not defined'\n 'all modules are used')\n parser.add_argument('-t', '--threads', default=cpu_count(), type=int, help='Number of threads to use')\n\n args = parser.parse_args()\n\n if args.list:\n for mod in BAMF_Detect.get_loaded_modules():\n print mod\n else:\n if args.path is None or len(args.path) == 0:\n parser.print_help()\n exit()\n for file_path, result in BAMF_Detect.async_scanning(args.path, args.detect, args.recursive, args.module,\n process_count=args.threads):\n try:\n to_print = json.dumps({file_path: result}) + \",\"\n print to_print\n except KeyboardInterrupt:\n raise\n except:\n # todo figure out a good way to log these errors\n pass" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5257623791694641, "avg_line_length": 28.261537551879883, "blob_id": "07c462ea955b0cf47cc3e73a76325ec02059c70a", "content_id": "715d1108abffa0efb603934be6726e9336840730", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1902, "license_type": "permissive", "max_line_length": 110, "num_lines": 65, "path": "/bamfdetect/BAMF_Detect/modules/njratgold.py", "repo_name": "mmilleror/narc", "src_encoding": "UTF-8", "text": "from common import Modules, data_strings_wide, load_yara_rules, PEParseModule, ModuleMetadata, is_ip_or_domain\nimport re, base64\n\nmyrex = re.compile('\\x00.\\x00e\\x00x\\x00e\\x00.?.?(.*)\\x00\\x01')\nmyc2rex = re.compile('(.*)#ic')\n\nclass Njratgold(PEParseModule):\n def __init__(self):\n md = ModuleMetadata(\n module_name=\"njratgold\",\n bot_name=\"Njratgold\",\n description=\"Njrat 0.7 Golden edition\",\n authors=[\"Paul Melson (@pmelson)\"],\n version=\"1.0\",\n date=\"January 23, 2018\",\n references=[]\n )\n PEParseModule.__init__(self, md)\n self.yara_rules = None\n\n def _generate_yara_rules(self):\n if self.yara_rules is None:\n self.yara_rules = load_yara_rules(\"njrat07g.yar\")\n return self.yara_rules\n\n @staticmethod\n def _is_number(s):\n if s != s.strip():\n return False\n try:\n if int(s) < 65536:\n return True\n return False\n except KeyboardInterrupt:\n raise\n except:\n return False\n\n @staticmethod\n def _getcfg(blob):\n cfg = bytearray()\n try:\n match = myrex.search(blob)\n elements = list(match.group(1))\n for char in elements:\n if char != '\\x00':\n cfg.append(char)\n except:\n print \"Error parsing Njrat 0.7 Golden config\"\n return cfg\n\n def get_bot_information(self, file_data):\n results = {}\n config = bytearray()\n config = Njratgold._getcfg(file_data)\n c2 = myc2rex.search(config)\n try:\n d = base64.b64decode(c2.group(1)).replace(\"~n\", \"s.\")\n except:\n d = \"nope\"\n if is_ip_or_domain(d):\n results['c2_uri'] = \"tcp://{0}\".format(d)\n return results\n\nModules.list.append(Njratgold())\n" }, { "alpha_fraction": 0.5428072214126587, "alphanum_fraction": 0.5952242016792297, "avg_line_length": 36.326087951660156, "blob_id": "591ae56f4fb7ad55b1776d656f9a9956ecfae7b9", "content_id": "c5edbbab7a4e0a3be4f9be5545eec90e1d03cd2a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1717, "license_type": "permissive", "max_line_length": 180, "num_lines": 46, "path": "/bamfdetect/BAMF_Detect/modules/remcos.py", "repo_name": "mmilleror/narc", "src_encoding": "UTF-8", "text": "from common import Modules, data_strings, load_yara_rules, PEParseModule, ModuleMetadata\nfrom re import match\nfrom string import ascii_lowercase, ascii_uppercase, digits\nimport rc4\n\nclass remcos(PEParseModule):\n def __init__(self):\n md = ModuleMetadata(\n module_name=\"remcos\",\n bot_name=\"Remcos RAT\",\n description=\"Shitty RAT\",\n authors=[\"Paul Melson (@pmelson)\",\"Brian Wallace (@botnet_hunter)\"],\n version=\"1.0\",\n date=\"April 3, 2018\",\n references=[]\n )\n PEParseModule.__init__(self, md)\n self.yara_rules = None\n pass\n\n def _generate_yara_rules(self):\n if self.yara_rules is None:\n self.yara_rules = load_yara_rules(\"remcos.yar\")\n return self.yara_rules\n\n def get_bot_information(self, file_data):\n results = {}\n offset = file_data.find(b'\\x00\\x00\\x00\\x00\\xc0\\x00\\x00\\x00\\xfc\\x00\\x00\\x00\\xfe\\x00\\x00\\x01\\xfe\\x00\\x00\\x01\\xfc\\x01\\xf8\\x03\\xfc\\x01\\xfc\\x03\\xfc\\x03\\xfe\\x07\\xfe\\x03\\xff\\xff')\n offset = offset+0x24\n key_len = ord(file_data[offset:offset+0x01])\n keyoffset = key_len+1\n key = rc4.convert_key(file_data[offset+0x01:offset+keyoffset])\n keystream = rc4.RC4(key)\n padoffset = file_data.find(b'\\x50\\x41\\x44\\x44\\x49\\x4e\\x47\\x58\\x58')\n encrypted_data = file_data[offset+keyoffset:padoffset]\n decrypted = ''\n for item in encrypted_data:\n decrypted += chr(ord(item) ^ keystream.next())\n urls = decrypted.split('|')[:-1]\n results['c2s'] = []\n for url in urls:\n results['c2s'].append({\"c2_uri\": \"tcp://\"+url})\n return results\n\n\nModules.list.append(remcos())\n" } ]
6
travispavek/nose-tags
https://github.com/travispavek/nose-tags
e4d7204e8f094f444d138d4b7f3a101b01d15637
112780eaf2754724a8478f3ba8570d44c3c8328c
2159419041674bf1e30b190164df9e4715837cc3
refs/heads/master
2021-01-11T00:06:42.515438
2016-11-10T20:53:26
2016-11-10T20:53:26
70,727,097
0
0
null
2016-10-12T18:03:44
2016-11-04T17:22:26
2016-11-10T20:53:26
Python
[ { "alpha_fraction": 0.5022600889205933, "alphanum_fraction": 0.5039419531822205, "avg_line_length": 34.22962951660156, "blob_id": "8e4ba6b74c760f376431e2f4beb7c4a6bd9b0fce", "content_id": "9affc51b738ab38d607979d8349fb5f8f875d106", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9513, "license_type": "permissive", "max_line_length": 84, "num_lines": 270, "path": "/tag/plugin.py", "repo_name": "travispavek/nose-tags", "src_encoding": "UTF-8", "text": "import logging\nimport os\nimport sys\nfrom nose.plugins.base import Plugin\nfrom nose.util import tolist\nfrom nose.plugins.collect import CollectOnly\nimport re\nimport json\nimport io\n\nlog = logging.getLogger('nose.plugins.tag')\ncompat_24 = sys.version_info >= (2, 4)\nprefix = os.environ.get('NOSE_TAG_PREFIX', 'tst')\n\ndef remove_prefix(attr):\n return re.sub('^%s_' % prefix, '', attr)\n \ndef add_prefix(attr):\n return '%s_%s' % (prefix, attr)\n\ndef tag(*args, **kwargs):\n \"\"\"Decorator that adds tags to classes or functions\n for use with the Tag (-t) plugin.\n \"\"\"\n def wrap_ob(ob):\n for name in args:\n setattr(ob, add_prefix(name), True)\n for name, value in kwargs.iteritems():\n setattr(ob, add_prefix(name), value)\n return ob\n return wrap_ob\n\nattr = tag\n\ndef get_method_attr(method, cls, attr_name, default = False):\n \"\"\"Look up an attribute on a method/ function. \n If the attribute isn't found there, looking it up in the\n method's class, if any.\n \"\"\"\n Missing = object()\n value = getattr(method, attr_name, Missing)\n if value is Missing and cls is not None:\n value = getattr(cls, attr_name, Missing)\n if value is Missing:\n return default\n return value\n\n\nclass ContextHelper:\n \"\"\"Object that can act as context dictionary for eval and looks up\n names as attributes on a method/ function and its class. \n \"\"\"\n def __init__(self, method, cls):\n self.method = method\n self.cls = cls\n\n def __getitem__(self, name):\n return get_method_attr(self.method, self.cls, add_prefix(name))\n\n\nclass TagSelector(Plugin):\n \"\"\"Selects test cases to be run based on their attributes.\n \"\"\"\n name = 'tag-selector'\n \n def __init__(self):\n Plugin.__init__(self)\n self.attribs = []\n\n def options(self, parser, env):\n \"\"\"Register command line options\"\"\"\n parser.add_option(\"-t\", \"--tag\",\n dest=\"tag\", action=\"append\",\n default=env.get('NOSE_TAG'),\n metavar=\"TAG\",\n help=\"Run only tests that have attributes \"\n \"specified by ATTR [NOSE_TAG]\")\n \n # disable in < 2.4: eval can't take needed args\n if compat_24:\n parser.add_option(\"-T\", \"--eval-tag\",\n dest=\"eval_tag\", metavar=\"TAG_EXPR\", action=\"append\",\n default=env.get('NOSE_EVAL_TAG'),\n help=\"Run only tests for whose tags \"\n \"the Python expression TAG_EXPR evaluates \"\n \"to True [NOSE_EVAL_TAG]\")\n\n def configure(self, options, config):\n \"\"\"Configure the plugin and system, based on selected options.\n\n attr and eval_attr may each be lists.\n\n self.attribs will be a list of lists of tuples. In that list, each\n list is a group of attributes, all of which must match for the rule to\n match.\n \"\"\"\n self.attribs = []\n\n # handle python eval-expression parameter\n if compat_24 and options.eval_tag:\n eval_tag = tolist(options.eval_tag)\n for tag in eval_tag:\n # \"<python expression>\"\n # -> eval(expr) in attribute context must be True\n def eval_in_context(expr, obj, cls):\n return eval(expr, None, ContextHelper(obj, cls))\n self.attribs.append([(tag, eval_in_context)])\n\n # attribute requirements are a comma separated list of\n # 'key=value' pairs\n if options.tag:\n std_tag = tolist(options.tag)\n for tag in std_tag:\n # all attributes within an attribute group must match\n tag_group = []\n for attrib in tag.strip().split(\",\"):\n # don't die on trailing comma\n if not attrib:\n continue\n items = attrib.split(\"=\", 1)\n if len(items) > 1:\n # \"name=value\"\n # -> 'str(obj.name) == value' must be True\n key, value = items\n else:\n key = items[0]\n if key[0] == \"!\":\n # \"!name\"\n # 'bool(obj.name)' must be False\n key = key[1:]\n value = False\n else:\n # \"name\"\n # -> 'bool(obj.name)' must be True\n value = True\n tag_group.append((key, value))\n self.attribs.append(tag_group)\n if self.attribs:\n self.enabled = True\n\n def validateTag(self, method, cls = None):\n \"\"\"Verify whether a method has the required attributes\n The method is considered a match if it matches all attributes\n for any attribute group.\n .\"\"\"\n # TODO: is there a need for case-sensitive value comparison?\n any = False\n for group in self.attribs:\n match = True\n for key, value in group:\n attr = get_method_attr(method, cls, add_prefix(key))\n if callable(value):\n if not value(key, method, cls):\n match = False\n break\n elif value is True:\n # value must exist and be True\n if not bool(attr):\n match = False\n break\n elif value is False:\n # value must not exist or be False\n if bool(attr):\n match = False\n break\n elif type(attr) in (list, tuple):\n # value must be found in the list attribute\n if not str(value).lower() in [str(x).lower()\n for x in attr]:\n match = False\n break\n else:\n # value must match, convert to string and compare\n if (value != attr\n and str(value).lower() != str(attr).lower()):\n match = False\n break\n any = any or match\n if any:\n # not True because we don't want to FORCE the selection of the\n # item, only say that it is acceptable\n return None\n return False\n\n def wantFunction(self, function):\n \"\"\"Accept the function if its attributes match.\n \"\"\"\n return self.validateTag(function)\n\n def wantMethod(self, method):\n \"\"\"Accept the method if its attributes match.\n \"\"\"\n try:\n cls = method.im_class\n except AttributeError:\n return False\n return self.validateTag(method, cls)\n\n\nclass MetadataCollector(CollectOnly):\n \"\"\"\n Collect and output test metadata only, don't run any tests.\n \"\"\"\n name = \"metadata-collector\"\n enableOpt = \"metadata_collector\"\n\n #def configure(self, option, conf):\n # self.attributes = dict()\n def __init__(self):\n super(MetadataCollector, self).__init__()\n self.cases = dict()\n \n def options(self, parser, env):\n \"\"\"Register commandline options.\n \"\"\"\n parser.add_option('--get-metadata',\n dest=self.enableOpt,\n action='store_true',\n help=\"Enable get-metadata: %s [GET_METADATA]\" %\n (self.help()))\n \n parser.add_option('--json-file',\n action='store',\n help=\"Path to JSON file to store test metadata\")\n \n def configure(self, options, config):\n if options.metadata_collector:\n self.enabled = True\n self.conf = config\n self.outfile = options.json_file\n \n def startTest(self, test):\n # location of vars is important\n tid = test.id()\n self.cases[tid] = dict()\n if hasattr(test, 'address'):\n self.cases[tid]['addr'] = test.address()\n else:\n self.cases[tid]['addr'] = None\n\n if hasattr(test, 'test'):\n test = test.test\n\n if hasattr(test, 'description'):\n self.cases[tid]['desc'] = test.description\n else:\n self.cases[tid]['desc'] = None\n self.cases[tid]['docs'] = test.__doc__\n self.cases[tid]['tags'] = self.get_tags(test)\n \n def get_tags(self, method):\n tmp = dict()\n is_attr = re.compile('^%s_\\S+' % prefix)\n # Get method attributes\n for attribute in filter(is_attr.match, dir(method)):\n tmp[remove_prefix(attribute)] = getattr(method, attribute)\n\n # Get class attributes\n cls = getattr(method, 'im_class', method.__class__)\n for attribute in filter(is_attr.match, dir(cls)):\n tmp[remove_prefix(attribute)] = getattr(cls, attribute)\n return tmp\n\n def finalize(self, result):\n if self.outfile:\n out = open(self.outfile, 'w')\n else:\n out = sys.stdout\n\n json.dump(self.cases, out, sort_keys=True, indent=4, separators=(',', ': '))\n\n" }, { "alpha_fraction": 0.8360655903816223, "alphanum_fraction": 0.8360655903816223, "avg_line_length": 60, "blob_id": "346caf0fc152b22115fa4a63fdc610de6059a594", "content_id": "1be4237f73aa3c0c98046f29afd796d6738744c3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 61, "license_type": "permissive", "max_line_length": 60, "num_lines": 1, "path": "/tag/__init__.py", "repo_name": "travispavek/nose-tags", "src_encoding": "UTF-8", "text": "from plugin import MetadataCollector, TagSelector, attr, tag\n" }, { "alpha_fraction": 0.7996820211410522, "alphanum_fraction": 0.7996820211410522, "avg_line_length": 51.5, "blob_id": "cf334ca69ae030fab15dc06dcaaa9ed07be7382a", "content_id": "7b957e1188f3aadd7230c68a27767ca8772f7767", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 629, "license_type": "permissive", "max_line_length": 244, "num_lines": 12, "path": "/README.md", "repo_name": "travispavek/nose-tags", "src_encoding": "UTF-8", "text": "# nose-tag\n\nnose-tag plugin enhances the existing built-in attrib plugin.\n\nEnhancements:\n\nAttribute Prefix - Attributes added using the attr decorator should pre prefixed with a unique string to make finding all nose attributes easier and to minimize the likelihood of name collisions.\n\nCollect Attributes - Because we now know all attributes added by nose we can ask the question what are all the attributes a method has. Option --attr-collect will print all selected tests and the attributes for that test in JSON form to stdout.\n\nBetter Selection - Improve the evaluation of expression parameters.\n - Not yet implemented" } ]
3
krkc64/open_cv_
https://github.com/krkc64/open_cv_
7d8bdd14172ca8db6cf97ea19db48016350529fc
47d96170b4fe32af43beab06af13f1b06de84556
ce59e706cc464827711a2ffe4cbeff4c9835d7c0
refs/heads/master
2022-12-17T08:09:20.439866
2020-09-15T06:44:29
2020-09-15T06:44:29
295,637,787
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.62129145860672, "alphanum_fraction": 0.6527050733566284, "avg_line_length": 32.70588302612305, "blob_id": "2219f05354480b7382c51517c090c9c4df823043", "content_id": "902f3f9c5a1f56063c57d34d6b81f4889e850428", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 573, "license_type": "no_license", "max_line_length": 75, "num_lines": 17, "path": "/Extracting_face_from_image.py", "repo_name": "krkc64/open_cv_", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport numpy as np\nfrom PIL import Image\nimport cv2\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nim = cv2.imread(\"image.jpg\")\n#im = Image.open(\"\"+dirname+\"/\"+subdirname+\"/\"+filename)\n#gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\nfaces = face_cascade.detectMultiScale(im, 1.3, 5)\nfor (x,y,w,h) in faces:\n \timg = cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2)\n \t#roi_gray = gray[y:y+h, x:x+w]\n \troi_color = img[y:y+h, x:x+w]\n\tname = './' + str(1) + '.jpg'\n\tprint ('Creating...' + name)\n\tcv2.imwrite(name, roi_color)\n" }, { "alpha_fraction": 0.5578764081001282, "alphanum_fraction": 0.5892080068588257, "avg_line_length": 24.53333282470703, "blob_id": "c206f72d70e5b7a7c2c6fef3f68b8b39a6f59f39", "content_id": "f85bf8b5cde0825ddeba0182bd3e9ce4a3a36686", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1149, "license_type": "no_license", "max_line_length": 75, "num_lines": 45, "path": "/crop_face_from_images.py", "repo_name": "krkc64/open_cv_", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport numpy as np\nfrom PIL import Image\nimport cv2\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\npath = \"/home/snake/Desktop/ca\"\n\ni = 0\n\nc = 0\nlabel=np.ones((20000,),dtype = int)\n\np = 0\n\nfor dirname , dirnames , filenames in os. walk ( path ):\n for subdirname in dirnames :\n subject_path = os. path . join ( dirname , subdirname )\n for filename in os. listdir ( subject_path ):\n\t\tim = cv2.imread(\"/home/snake/Desktop/ca/kat1/\"+filename)\n\t\t#im = Image.open(\"\"+dirname+\"/\"+subdirname+\"/\"+filename)\n\t\t#gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\t\tfaces = face_cascade.detectMultiScale(im, 1.3, 5)\n\t\tfor (x,y,w,h) in faces:\n \t\t\timg = cv2.rectangle(im,(x,y),(x+w,y+h),(255,0,0),2)\n \t\t\t#roi_gray = gray[y:y+h, x:x+w]\n \t\t\troi_color = img[y:y+h, x:x+w]\n\t\t\tname = './input_r3/' + str(i) + '.jpg'\n\t \t\tprint ('Creating...' + name)\n\t \t\tcv2.imwrite(name, roi_color)\n\t\t\ti +=1\n \t\t\t\n\t\t#img = im.resize((200,200))\n \t\t#gray = img.convert('L')\n\t\t#gray.save(\"\"+'input_r'+\"/\"+filename, \"JPEG\")\n\t\t#c +=1\n\t\t#label[p] = i\n\t\t#p +=1\n\t \n\t \t\t\n\n\n\nprint('no of files are', i)\n" } ]
2
Sshanu/GoogleApps
https://github.com/Sshanu/GoogleApps
502511a48e1a54192dc401d7270bc688fc78b0ca
a764a0caf619ee9169836464c80e96daf5781f67
232f9b38fbc2e9f47470d5d600a7fa4eeed38966
refs/heads/master
2017-11-25T00:00:06.290890
2016-05-31T21:33:03
2016-05-31T21:33:03
60,124,852
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8125, "alphanum_fraction": 0.8125, "avg_line_length": 23, "blob_id": "4059b6982763bc9b44b38a91daa805d488939547", "content_id": "aa74a48da2fc46e85c48894d7d3a7d9d9b675012", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 48, "license_type": "no_license", "max_line_length": 34, "num_lines": 2, "path": "/README.md", "repo_name": "Sshanu/GoogleApps", "src_encoding": "UTF-8", "text": "# GoogleApps\nUse AppEngine to host local server\n" }, { "alpha_fraction": 0.6575157642364502, "alphanum_fraction": 0.6683168411254883, "avg_line_length": 29.86111068725586, "blob_id": "36054e2e2f99297aec0f1694b0c60637dd610c63", "content_id": "f7a6d3d794cbbe77c38d15d6242bd0d0b08945a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2222, "license_type": "no_license", "max_line_length": 119, "num_lines": 72, "path": "/hello/main.py", "repo_name": "Sshanu/GoogleApps", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport webapp2\nform=\"\"\"\n<form method=\"post\">\n\tWhat is your birthday?\n\t<br>\n\t<label>Month\n\t\t<input type=\"text\" name=\"month\" value=\"%(month)s\">\n\t</label>\n\t<label>Day\n\t\t<input type=\"text\" name=\"day\" value=\"%(day)s\">\n\t</label>\n\t<label>Year\n\t\t<input type=\"text\" name=\"year\" value=\"%(year)s\">\n\t</label>\n\t<div style=\"color :red\">%(error)s</div>\n\t<br>\n\t<br>\n\t<input type=\"submit\" >\n</form>\n\"\"\"\nmonths =['January','February','March','April','May','June','July','August','September','October','November','December']\nmonth_abbvs = dict((m[:3].lower(),m) for m in months)\ndef valid_month(month):\n\tif month:\n\t\tshort_month = month[:3].lower()\n\t\treturn month_abbvs.get(short_month)\ndef valid_day(day):\n\tif day and day.isdigit():\n\t\tday = int(day)\n\t\tif day > 0 and day <= 31:\n\t\t\treturn day\ndef valid_year(year):\n\tif year and year.isdigit():\n\t\tyear = int(year)\n\t\tif year > 1990 and year < 2020:\n\t\t\treturn year \ndef write_form(self,error=\"\",month=\"\", year=\"\",day=\"\"):\n\t\tself.response.out.write(form % {\"error\":error, \"month\":month, \"year\":year, \"day\":day})\nclass MainHandler(webapp2.RequestHandler):\n def get(self):\n write_form(self)\n def post(self):\n \tuser_month = self.request.get('month')\n \tuser_day = self.request.get('day')\n \tuser_year = self.request.get('year')\n \tmonth = valid_month(user_month)\n \tyear = valid_year(user_year)\n \tday = valid_day(user_day)\n \tif not (month and year and day):\n \t\twrite_form(self,\"Not a valid input\",user_month,user_year,user_day)\n \telse:\n\t\tself.response.out.write(\"Thanks! Thats valid input\")\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler)\n], debug=True)\n" } ]
2
clairedinauer/blam-access-data
https://github.com/clairedinauer/blam-access-data
00d60409703bd08029523d73c9ed276a946efbb0
72a64f07ac6aed4e4a1b5c6450ded9ba245f8842
e4761d5bdc7905f4c2900ceb75f27ed6c8abd15d
refs/heads/master
2020-07-05T19:20:51.406643
2019-08-30T18:56:40
2019-08-30T18:56:40
202,745,721
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5209790468215942, "alphanum_fraction": 0.5279720425605774, "avg_line_length": 14.052631378173828, "blob_id": "758a574cea70c0d769d362aa88c3568c2ba8c9e5", "content_id": "8489fc1807795013100b0f86a2f3cf565800d013", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 286, "license_type": "no_license", "max_line_length": 53, "num_lines": 19, "path": "/predCode/predCode_access_db.py", "repo_name": "clairedinauer/blam-access-data", "src_encoding": "UTF-8", "text": "'''\nThis is a list of the Access Database checklist forms\n'''\n\nforms = [\n 'Medical History',\n 'ConCom Meds',\n 'NonPychCC Meds',\n 'NUQ',\n 'DDF5',\n 'SCID5 Summary',\n 'MATRICS',\n 'Hollow Mask Illusion',\n 'BPRS',\n 'CHAT',\n 'CAINS',\n 'LOF',\n 'WAMHI'\n]\n" }, { "alpha_fraction": 0.5956543684005737, "alphanum_fraction": 0.5997284054756165, "avg_line_length": 33.45029067993164, "blob_id": "657a31601348049e0b398465d35ad9d9a0d7f1d9", "content_id": "e79a908088c2470d58e979951c55e8b7e80e1ba6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5891, "license_type": "no_license", "max_line_length": 135, "num_lines": 171, "path": "/predCode/predictive_coding_tracking_functions.py", "repo_name": "clairedinauer/blam-access-data", "src_encoding": "UTF-8", "text": "'''\nThis script allows the user to select, view, add, and remove subjects from the Predictive Coding study's participant tracking database.\n\nThe layout is in functions.\n'''\n\nimport pandas as pd\nimport predCode_participant_list # import participant list (subject IDs)\nimport predCode_table_of_events # import table of events list\nimport predCode_access_db # import access database list\nfrom time import sleep as s # allows for pauses in display time\n\n# Link to the predCode_table_of_events.xlsx Excel sheet\n# This sheet covers the \"Table of Events\" data entry checklist\ndf = pd.read_excel(r'excelData/predCode_table_of_events.xlsx', sheet_name='TABLE OF EVENTS')\n\n\ndef select():\n print(\"Welcome to Predictive Coding Study Particpant Tracking Database.\\n\")\n s(1)\n print(\"1. Lookup specific participant ID.\")\n print(\"2. List all missing data from all subjects.\")\n print(\"3. Add new subject to database.\")\n print(\"4. Remove subject from database.\")\n print(\"5. Bye!\\n\")\n global selection\n selection = int(input(\"What would you like to do? \"))\n s(1)\n\n\ndef lookupTOE():\n global beliefid\n beliefid = str.upper((input('Please enter BeliefID: ')))\n if beliefid in predCode_participant_list.belieflist:\n print(\"\\nBeliefID found. Here is what I have on file for \" + beliefid + \":\")\n\n # this selects all rows where the participant id is equal to the user input\n df_temp = df[df['MPRCID'] == beliefid]\n\n # this resets the index to regular, so we can just use iloc(0)\n df_temp = df_temp.reset_index()\n\n for i in predCode_table_of_events.toeForms:\n if df_temp.loc[0, i] == 'no':\n print(i + ' missing')\n\n print('NOTES:')\n print(df.loc[df.MPRCID == beliefid, 'NOTES'])\n\n else:\n print(\"Sorry, I do not have \" + beliefid + \" on file.\")\n\n\ndef lookupAccess():\n s(1)\n access = str(input(\"\\nWould you also like to see what is missing for \" +\n beliefid + \" in Access? (y/n) \"))\n if access == 'y':\n df = pd.read_excel(r'excelData/predCode_table_of_events.xlsx', sheet_name='ACCESS')\n if beliefid in predCode_participant_list.belieflist:\n print(\"\\n\" + beliefid + \" is missing the following forms in Access:\")\n\n # this selects all rows where the participant id is equal to the user input\n df_temp = df[df['MPRCID'] == beliefid]\n\n # this resets the index to regular, so we can just use iloc(0)\n df_temp = df_temp.reset_index()\n\n for i in predCode_access_db.forms:\n if df_temp.loc[0, i] == 'no':\n print(i + ' missing')\n else:\n print(\"Sorry, I do not have \" + beliefid + \" on file.\")\n\n else:\n exit()\n\n\ndef listallTOE():\n for beliefid in predCode_participant_list.belieflist:\n print(\"\\nParticipant \" + beliefid + \" is missing:\")\n\n # this selects all rows where the participant id is equal to the user input\n df_temp = df[df['MPRCID'] == beliefid]\n\n # this resets the index to regular, so we can just use iloc(0)\n df_temp = df_temp.reset_index()\n\n for i in predCode_table_of_events.toeForms:\n if df_temp.loc[0, i] == 'no':\n print(' ' + i)\n\n print('NOTES:')\n print(df.loc[df.MPRCID == beliefid, 'NOTES'])\n\n\ndef listallAccess():\n s(1)\n access = str(input(\"\\nWould you like to list who is missing which forms in Access? (y/n) \"))\n if access == 'y':\n df = pd.read_excel(r'excelData/predCode_table_of_events.xlsx', sheet_name='ACCESS')\n\n for beliefid in predCode_participant_list.belieflist:\n print(\"\\nParticipant \" + beliefid + \" is missing:\")\n\n # this selects all rows where the participant id is equal to the user input\n df_temp = df[df['MPRCID'] == beliefid]\n\n # this resets the index to regular, so we can just use iloc(0)\n df_temp = df_temp.reset_index()\n\n for i in predCode_access_db.forms:\n if df_temp.loc[0, i] == 'no':\n print(i)\n else:\n exit()\n\n\ndef add(): # allows the uer to add a new BeliefID\n beliefid = str.upper((input('Please enter new BeliefID: ')))\n\n # This checks if the BeliefID is already in the list:\n with open('list.txt') as f:\n if beliefid in f.read():\n print(\"This BeliefID is already taken.\")\n return # Insert \"return\" to avoid adding a beliefid that already exists\n\n # This adds the new beliefid to the text file using append through 'a' and 'writelines':\n with open('list.txt', 'a') as filehandle:\n filehandle.writelines(\"'\" + beliefid + \"',\\n\")\n print(\"BeliefID successfully added. Remember to save the 'list.txt' file.\")\n return\n\n\ndef remove():\n beliefid = str.upper((input('Please enter BeliefID: ')))\n\n # This checks if the BeliefID is not in the list:\n with open('list.txt') as f:\n if beliefid not in f.read():\n print(\"This BeliefID is not in the list, and therefore cannot be removed.\")\n return\n\n # If the BeliefID is in the list, it will be deleted:\n with open('list.txt', 'r+') as f:\n t = f.read()\n beliefid_delete = beliefid.strip()\n f.seek(0)\n for line in t.split('\\n'):\n if line != beliefid_delete:\n f.write(line + '\\n')\n f.truncate()\n print(\"BeliefID successfully deleted. Remember to save the 'list.txt' file.\")\n\n\nif __name__ == \"__main__\":\n select()\n if selection == 1:\n lookupTOE()\n lookupAccess()\n elif selection == 2:\n listallTOE()\n listallAccess()\n elif selection == 3:\n add()\n elif selection == 4:\n remove()\n elif selection == 5:\n print(\"Until next time, my friend :)\")\n s(2)\n exit()\n" }, { "alpha_fraction": 0.691919207572937, "alphanum_fraction": 0.6939393877983093, "avg_line_length": 35.66666793823242, "blob_id": "bcb9b1cfe842b265715d753c5d66f62465c30a33", "content_id": "ea82395cb4276381f1465361b03eb889e34773eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 990, "license_type": "no_license", "max_line_length": 90, "num_lines": 27, "path": "/oldToeAccessFunctions/accesscheck.py", "repo_name": "clairedinauer/blam-access-data", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport predCode # predictive coding participant list\nimport predCode_access_db # predictive coding access checklist\n\n# Link to the practice_accesscheck.xlsx Excel sheet\n# This sheet covers the \"Access\" data entry checklist\n\nprint(\"This system will indicate if a participant's forms have been entered in Access.\\n\")\ndf = pd.read_excel(r'practice_accesscheck.xlsx', sheet_name='ACCESS')\n\nbeliefid = str.upper((input('Please enter BeliefID: ')))\n\nif beliefid in predCode.belieflist:\n print(\"\\nBeliefID found. Here is what I have on file for \" + beliefid + \":\")\n\n # this selects all rows where the participant id is equal to the user input\n df_temp = df[df['MPRCID'] == beliefid]\n\n # this resets the index to regular, so we can just use iloc(0)\n df_temp = df_temp.reset_index()\n\n for i in predCode_access_db.forms:\n if df_temp.loc[0, i] == 'no':\n print(i + ' missing')\n\nelse:\n print(\"Sorry, I do not have \" + beliefid + \" on file.\")\n" }, { "alpha_fraction": 0.48159998655319214, "alphanum_fraction": 0.4896000027656555, "avg_line_length": 14.625, "blob_id": "33579f037000a041824049411b423da65078e3a9", "content_id": "8abdecad33f16bc3b24f8694cf9d2d35df65c7fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 625, "license_type": "no_license", "max_line_length": 44, "num_lines": 40, "path": "/predCode/predCode_table_of_events.py", "repo_name": "clairedinauer/blam-access-data", "src_encoding": "UTF-8", "text": "'''\nThis is a list of the Table of Events forms.\n'''\n\ntoeForms = [\n 'Consent Form',\n 'Doc to Consent',\n 'DEMO 1',\n 'DEMO 2',\n 'Medical History',\n 'ConCom Meds',\n 'NonPychCC Meds',\n 'NUQ',\n 'Urine Tox',\n 'MRI Safety',\n 'DDF5',\n 'SCID5',\n 'SCID5 Summary',\n 'WTAR',\n 'MATRICS',\n 'Hollow Mask Illusion',\n 'PDI + LSHS',\n 'Conditioned Hallucinations',\n 'Food Task',\n 'BPRS',\n 'CHAT',\n 'CAINS',\n 'LOF',\n 'WAMHI',\n 'MRS',\n 'Reversal Learning',\n 'ASI',\n 'GCB',\n 'PANAS',\n 'Motion Change',\n 'MMN',\n 'LTP',\n 'Agency Task',\n 'NOTES'\n]\n" }, { "alpha_fraction": 0.6887477040290833, "alphanum_fraction": 0.6905626058578491, "avg_line_length": 34.54838562011719, "blob_id": "49c563e7cb1534b3f0c800ed05a0aa87aa738c16", "content_id": "b43c59d6537e5699484cdad54411e4bd9d40a8c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1102, "license_type": "no_license", "max_line_length": 80, "num_lines": 31, "path": "/oldToeAccessFunctions/tableEvents.py", "repo_name": "clairedinauer/blam-access-data", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport predCode_participant_list # predictive coding participant list\nimport predCode_table_of_events # predictive coding table of events list\n\n# Link to the practice_accesscheck.xlsx Excel sheet\n# This sheet covers the \"Access\" data entry checklist\n\nprint(\"This system will pull up a participant's Table of Events.\\n\")\ndf = pd.read_excel(r'practice_accesscheck.xlsx', sheet_name='TABLE OF EVENTS')\n\nbeliefid = str.upper((input('Please enter BeliefID: ')))\n\n\nif beliefid in predCode_participant_list.belieflist:\n print(\"\\nBeliefID found. Here is what I have on file for \" + beliefid + \":\")\n\n # this selects all rows where the participant id is equal to the user input\n df_temp = df[df['MPRCID'] == beliefid]\n\n # this resets the index to regular, so we can just use iloc(0)\n df_temp = df_temp.reset_index()\n\n for i in predCode_table_of_events.toeForms:\n if df_temp.loc[0, i] == 'no':\n print(i + ' missing')\n\n print('NOTES:')\n print(df.loc[df.MPRCID == beliefid, 'NOTES'])\n\nelse:\n print(\"Sorry, I do not have \" + beliefid + \" on file.\")\n" }, { "alpha_fraction": 0.5810810923576355, "alphanum_fraction": 0.5810810923576355, "avg_line_length": 8.25, "blob_id": "f8b4081ce0e794a877bd328aa6bbef40cbb3f8d1", "content_id": "6ca3b841053fbffe91b1d3b1707127fa7e083dee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 27, "num_lines": 8, "path": "/test.practice/uppertest.py", "repo_name": "clairedinauer/blam-access-data", "src_encoding": "UTF-8", "text": "a = \"m\"\nprint(a)\nprint(a.upper())\n\n\nb = input(\"type your name\")\n\nprint(b)\n" }, { "alpha_fraction": 0.30756014585494995, "alphanum_fraction": 0.6219931244850159, "avg_line_length": 16.37313461303711, "blob_id": "b8abd0f634b403837e8763a7b6c20f629d5d85d1", "content_id": "aef9f63de6a5b65367f7b1de7478f181746b9d49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1164, "license_type": "no_license", "max_line_length": 76, "num_lines": 67, "path": "/test.practice/checklist.py", "repo_name": "clairedinauer/blam-access-data", "src_encoding": "UTF-8", "text": "BeliefID = str(input(\"Please enter BeliefID: \"))\nbelieflist = [\"000001\", \"000002\", \"000003\", \"000004\", \"000005\"]\n\nif BeliefID in belieflist:\n print (\"Great, let me pull up data for participant \" + BeliefID +\".\")\n\n if BeliefID == \"000001\" or \"000002\" or \"000003\" or \"000004\" or \"000005\":\n checklist = str(input(\"Which form do you want to confirm? \"))\n if checklist == \"Medical History\" and BeliefID == \"000001\":\n print(\"This form is completed.\")\n else:\n print(\"This form is missing.\")\n\nelse:\n print(\"Sorry, I do not have \" + BeliefID + \" on file.\")\n\nbeliefid = ['M042300',\n'M042311',\n'M042322',\n'M042333',\n'M042344',\n'M042355',\n'M042366',\n'M042377',\n'M042388',\n'M042390',\n'M042401',\n'M042412',\n'M042423',\n'M042434',\n'M042445',\n'M042456',\n'M042467',\n'M042478',\n'M042480',\n'M042491',\n'M042502',\n'M042513',\n'M042524',\n'M042535',\n'M042546',\n'M042557',\n'M042568',\n'M042570',\n'M042581',\n'M042592',\n'M042603',\n'M042614',\n'M042625',\n'M042636',\n'M042647',\n'M042658',\n'M042660',\n'M042671',\n'M042682',\n'M042693',\n'M042704',\n'M042715',\n'M042726',\n'M042737',\n'M042748',\n'M042750',\n'M042761',\n'M042772',\n'M042783',\n'M042794'\n]\n" }, { "alpha_fraction": 0.5784000158309937, "alphanum_fraction": 0.5983999967575073, "avg_line_length": 24, "blob_id": "2346e3cdba733eab6088394a1fff7a28406a4e14", "content_id": "482333456ae81a281263a1a4cfd1e19f45c65cba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1250, "license_type": "no_license", "max_line_length": 70, "num_lines": 50, "path": "/test.practice/participant-tracking.py", "repo_name": "clairedinauer/blam-access-data", "src_encoding": "UTF-8", "text": "import predCode\n\nclass BeliefID: # this is a class object\n\n def __init__(self, id, first, last, diagnosis): # method initment\n self.id = id # instance variables, aka attributes\n self.first = first # instance variables, aka attributes\n self.last = last # take instance as 1st argument SELF.etc\n self.diagnosis = diagnosis\n # self.dob = dob\n # self.handedness = handedness\n # self.gender = gender\n # self.phone = phone\n # self.voicemail = voicemail\n # self.text = text\n # self.email = email\n # self.diagnosis = diagnosis\n # self.onset = onset\n # self.drugs = drugs\n # self.screendate = screendate\n\n def fullname(self): # this is a method\n return '{} {}'.format(self.first, self.last)\n\n @property\n def diagnosis(self):\n return '{}'.format(self.diagnosis)\n\n # @diagnosis.setter\n # def diagnosis(self, diagnosis):\n # self.diagnosis = diagnosis\n\n# class PredictiveCoding(BeliefID):\n#\n# def __init__(self):\n#\n# super().__init__(id, first, last)\n# self.predcodeid = predcodeid\n\n\n\n\n\n\n\nM042300 = BeliefID('000001', 'None', 'None')\n\nM042300.diagnosis(\"Psychic\")\n\n# print(M042300.fullname())\n" }, { "alpha_fraction": 0.7519110441207886, "alphanum_fraction": 0.7526059746742249, "avg_line_length": 94.93333435058594, "blob_id": "77120b83d289095e009d1564ac3b638df3be2508", "content_id": "6e0675edcb446c0a756a96cac0ec60b187e5248a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1439, "license_type": "no_license", "max_line_length": 258, "num_lines": 15, "path": "/README.md", "repo_name": "clairedinauer/blam-access-data", "src_encoding": "UTF-8", "text": "# blam-access-data\n excelData Folder:\n -- Includes the record of participants' completion of Table of Events\n -- Includes \"practice_accesscheck.xlsx\" which has the record of both \"ACCESS\" and \"TABLE OF EVENTS\" completion\n\n oldToeAccessFunctions Folder:\n -- Includes the previous Python files used to either lookup a participant's completion of Table of Events or Access Data entry ('accesscheck.py' and 'tableEvents.py'), or list all participants' completion ('accesscheck_ALL.py' and 'tableEvents_ALL.py')\n\n predCode Folder:\n -- Includes the code focused on carrying out the previously listed tasks. The user may choose which task they would like to carry out. This is in \"predictive_coding_tracking.py\".\n -- Includes \"lists\" folder, which holds the list of participants, access forms, and table of events forms. This folder also has the .txt file lists for appending participants. practice_checklist.xlsx\n\n Notes to Self for Future Improvements:\n -- The next thing I would like to tackle, specifically in predictive_coding_tracking_classes.py, is to create a loop for the add() and remove() functions if the user wishes to add or remove multiple Belief IDs.\n -- I would also like to create a loop for the numbered menu that users will return to after completing their desired function. In other words, the program will re-run the menu after each task until the user enters \"5\" to exit.\n" }, { "alpha_fraction": 0.7454545497894287, "alphanum_fraction": 0.7636363506317139, "avg_line_length": 22.571428298950195, "blob_id": "7580d0846d1b744896069918be5888b89ed93c89", "content_id": "cca96c29a4212835bf4ec69360fbe9bc07bf1b0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 165, "license_type": "no_license", "max_line_length": 75, "num_lines": 7, "path": "/HollowMaskIllusion/HMI.py", "repo_name": "clairedinauer/blam-access-data", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom collections import Counter\n\ndf_HMI = pd.read_excel('HollowMaskExcel.xlsx', sheet_name='HMI_Simplified')\n\n\nprint(dict(Counter(df_HMI.T012)))\n" }, { "alpha_fraction": 0.6998972296714783, "alphanum_fraction": 0.7029804587364197, "avg_line_length": 33.75, "blob_id": "fbe8934e4bbdaac64e612e57622fa6d4f6135f12", "content_id": "ac67bc817f66b9eeb0d9804819e66ef877904245", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 973, "license_type": "no_license", "max_line_length": 79, "num_lines": 28, "path": "/oldToeAccessFunctions/tableEvents_ALL.py", "repo_name": "clairedinauer/blam-access-data", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport predCode_participant_list # predictive coding participant list\nimport predCode_table_of_events # predictive coding table of events list\nfrom time import sleep as s\n\n# Link to the practice_accesscheck.xlsx Excel sheet\n# This sheet covers the \"Access\" data entry checklist\n\nprint(\"This system will pull up a participant's Table of Events.\\n\")\ns(1)\n\ndf = pd.read_excel(r'practice_accesscheck.xlsx', sheet_name='TABLE OF EVENTS')\n\nfor beliefid in predCode_participant_list.belieflist:\n print(\"\\nParticipant \" + beliefid + \" is missing:\")\n\n # this selects all rows where the participant id is equal to the user input\n df_temp = df[df['MPRCID'] == beliefid]\n\n # this resets the index to regular, so we can just use iloc(0)\n df_temp = df_temp.reset_index()\n\n for i in predCode_table_of_events.toeForms:\n if df_temp.loc[0, i] == 'no':\n print(i)\n\n print('NOTES:')\n print(df.loc[df.MPRCID == beliefid, 'NOTES'])\n" } ]
11
jnammu21/Python
https://github.com/jnammu21/Python
d2316351e5f9692826082ab95080c048c6383c0f
f67a8b95a41dc0229a4919a29009357df9b0ad12
6046b0cf7f0127844a0be23d0c738cff2c2eabc4
refs/heads/master
2020-03-25T22:16:10.039657
2018-08-09T23:33:10
2018-08-09T23:33:10
144,213,533
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.44341281056404114, "alphanum_fraction": 0.4614805579185486, "avg_line_length": 35.227272033691406, "blob_id": "9ed3f10e4afa6d302885a0e2c9b5fa3d63eb87de", "content_id": "2f5e15f25d7310d86f9f0e7d1ddf20d98a2e2ec0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3985, "license_type": "no_license", "max_line_length": 119, "num_lines": 110, "path": "/Vending_Machine.py", "repo_name": "jnammu21/Python", "src_encoding": "UTF-8", "text": "print(\"Welcome to the UB Vending Machine\")\nprint(\"Enter the number of nickels you wish to insert: \")\nnumber_of_nickels = input()\nin_dollar = float(number_of_nickels)*0.05\nrdollar = round(in_dollar,2)\nbalance=float(rdollar)\nprint(\"you inserted\" +' ' + str(rdollar) +' ' +\"dollars\")\ntotal_purchase = 0.0\n#balance = 0.0\ndef main_menu():\n print(\"-----------------------------------\\n\"\n \"Main menu:\\n\"\n \"-----------------------------------\\n\"\n \"[1] Drinks\\n\"\n \"[2] Snacks\\n\"\n \"[3] Exit\\n\"\n \"Select an option <3 to exit>: \")\ndef drinks_menu():\n\n while True:\n print(\"-----------------------------------\\n\"\n \"Drinks menu:\\n\"\n \"-----------------------------------\\n\"\n \"Water $0.75\\n\"\n \"Juice $0.99\\n\"\n \"Soda $1.39\\n\"\n \"Select a drink by entering the full name <x to exit to the main menu>\\n\")\n\n choice = input(\"Drink option: \")\n choice = str(choice)\n if choice == \"Water\":\n global balance\n balance = float(balance) - 0.75\n global total_purchase\n if(balance >0):\n print(\"Vending water, you have\", str(balance), \"dollars left\")\n else:\n print(\"you don't have enough money to buy Water <\", str(rdollar), \"<\", str(0.75), \">\")\n\n elif choice == \"Juice\":\n balance = float(balance) - 0.99\n if(balance >0):\n print(\"Vending Juice, you have\", str(balance), \"dollars left\")\n else:\n print(\"you don't have enough money to buy Juice <\", str(rdollar), \"<\", str(0.99), \">\")\n elif choice == \"Soda\":\n balance = float(balance) - 1.39\n if(balance >0):\n print(\"Vending Soda, you have\", str(balance), \"dollars left\")\n else:\n print(\"you don't have enough money to buy Soda <\", str(rdollar), \"<\", str(1.39), \">\")\n elif choice == \"x\":\n return main_menu()\n else:\n print(\"invalid option!\")\n\ndef snacks_menu():\n\n while True:\n print(\"-----------------------------------\\n\"\n \"snacks menu:\\n\"\n \"-----------------------------------\\n\"\n \"Chips $0.99\\n\"\n \"Peanuts $0.5\\n\"\n \"Gum $1.35\\n\"\n \"Select a snack by entering the full name <x to exit to the main menu>\\n\")\n\n choice = input(\"Snack option: \")\n choice = str(choice)\n if choice == \"Chips\":\n global balance\n balance = float(balance) - 0.99\n global total_purchase\n if(balance >0):\n print(\"Vending water, you have\", str(balance), \"dollars left\")\n else:\n print(\"you don't have enough money to buy Chips <\", str(rdollar), \"<\", str(0.99), \">\")\n\n elif choice == \"Peanuts\":\n balance = float(balance) - 0.5\n if(balance >0):\n print(\"Vending Juice, you have\", str(balance), \"dollars left\")\n else:\n print(\"you don't have enough money to buy Peanuts <\", str(rdollar), \"<\", str(0.5), \">\")\n elif choice == \"Gum\":\n balance = float(balance) - 0.35\n if(balance >0):\n print(\"Vending Gum, you have\", str(balance), \"dollars left\")\n else:\n print(\"you don't have enough money to buy Gum <\", str(rdollar), \"<\", str(0.35), \">\")\n elif choice == \"x\":\n return main_menu()\n else:\n print(\"invalid option!\")\n\nwhile True:\n\n main_menu()\n choice = input()\n choice = int(choice)\n if choice == 1:\n drinks_menu()\n elif choice == 2:\n snacks_menu()\n elif choice == 3:\n print(\"-------------------------------\\n\"\n \"Inserted amount: \", str(rdollar),\",\", \"total purchase: \", rdollar-balance, \",\", \"change: \", balance)\n exit()\n else:\n print(\"invalid option!\")\n" } ]
1
darkfower/ops-manager
https://github.com/darkfower/ops-manager
4ef108bba414473c41c528b1f12283ce259076db
4a389b1aaeb039f4ae418c03cc0d0fe2e5f92e84
7b4272c2f8db17c9be98aaac754a6db81edd104b
refs/heads/master
2016-09-06T04:54:50.670975
2012-06-06T06:37:23
2012-06-06T06:37:23
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5227981805801392, "alphanum_fraction": 0.530789852142334, "avg_line_length": 36.98128890991211, "blob_id": "ab575016dd4042a8f80e964a729c751e2ecaee19", "content_id": "9573adfc4b9781e2662e5ab7552c8d31ae2a8513", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18269, "license_type": "no_license", "max_line_length": 148, "num_lines": 481, "path": "/webadmin/pki/openssl.py", "repo_name": "darkfower/ops-manager", "src_encoding": "UTF-8", "text": "import os\nimport re\nimport string\nimport random\n\nfrom subprocess import Popen, PIPE, STDOUT\nfrom shutil import rmtree\nfrom logging import getLogger\n\nfrom django.template.loader import render_to_string\n\nimport pki.models\nfrom pki.helper import subject_for_object\nfrom pki.settings import PKI_OPENSSL_BIN, PKI_OPENSSL_CONF, PKI_DIR, PKI_OPENSSL_TEMPLATE, \\\n PKI_SELF_SIGNED_SERIAL, PKI_CA_NAME_BLACKLIST\n\ntry:\n # available in python-2.5 and greater\n from hashlib import md5 as md5_constructor\nexcept ImportError:\n # compatibility fallback\n from md5 import new as md5_constructor\n\nlogger = getLogger(\"pki\")\n\ndef refresh_pki_metadata(ca_list):\n \"\"\"Refresh pki metadata (PKI storage directories and openssl configuration files)\n\n Each ca_list element is a dictionary:\n 'name': CA name\n \"\"\"\n \n # refresh directory structure\n dirs = { 'certs' : 0755,\n 'private': 0700,\n 'crl' : 0755,\n }\n \n try:\n # create base PKI directory if necessary\n if not os.path.exists(PKI_DIR):\n logger.info('Creating base PKI directory %s' % PKI_DIR)\n os.mkdir(PKI_DIR, 0700)\n \n # list of old CA directories for possible purging\n purge_dirs = set([os.path.join(PKI_DIR, d) for d in os.listdir(PKI_DIR)\n if os.path.isdir(os.path.join(PKI_DIR, d))])\n \n # loop over CAs and create necessary filesystem objects\n for ca in ca_list:\n ca_dir = os.path.join(PKI_DIR, ca.name)\n \n # create CA directory if necessary\n if not ca_dir in purge_dirs:\n logger.info(\"Creating base directory for new CA %s\" % ca.name)\n os.mkdir(ca_dir)\n \n # create nested directories for key storage with proper permissions\n for d, m in dirs.items():\n os.mkdir(os.path.join(ca_dir, d), m)\n \n initial_serial = 0x01\n \n try:\n if not ca.parent and int(PKI_SELF_SIGNED_SERIAL) > 0:\n initial_serial = PKI_SELF_SIGNED_SERIAL+1 \n except ValueError:\n logger.error( \"PKI_SELF_SIGNED_SERIAL failed conversion to int!\" )\n \n h2s = '%X' % initial_serial\n \n if len(h2s) % 2 == 1:\n h2s = '0' + h2s\n \n # initialize certificate serial number\n s = open(os.path.join(ca_dir, 'serial'), 'wb')\n s.write(h2s)\n s.close()\n \n logger.info(\"Initial serial number set to %s\" % h2s)\n \n # initialize CRL serial number\n s = open(os.path.join(ca_dir, 'crlnumber'), 'wb')\n s.write('01')\n s.close()\n \n # touch certificate index file\n open(os.path.join(ca_dir, 'index.txt'), 'wb').close()\n \n # do not delete existing CA dir\n purge_dirs.discard(ca_dir)\n \n # purge unused CA directories\n for d in purge_dirs:\n if os.path.isdir(d):\n # extra check in order to keep unrelated directory from recursive removal...\n # (in case if something wrong with paths)\n # probably can be removed when debugging will be finished\n if os.path.isfile(os.path.join(d, 'crlnumber')):\n logger.debug(\"Purging CA directory tree %s\" % d)\n rmtree(d)\n else:\n logger.warning('Directory %s does not contain any metadata, preserving it' % d)\n \n x509_list =[]\n for x509 in pki.models.x509Extension.objects.all():\n if x509.is_ca():\n x509.ca = True\n else:\n x509.ca = False\n x509_list.append(x509)\n \n # render template and save result to openssl.conf\n conf = render_to_string(PKI_OPENSSL_TEMPLATE, {'ca_list': ca_list, 'x509_extensions': x509_list,})\n \n f = open(PKI_OPENSSL_CONF, 'wb')\n f.write(conf)\n f.close()\n except Exception, e:\n logger.exception(\"Refreshing PKI metadata failed: %s\" % e)\n \n logger.info(\"Successfully finished PKI metadata refresh\")\n\nclass Openssl():\n \"\"\"OpenSSL command and task wrapper class\n \n instance must be a CertificateAuthority or Certificate object.\n \"\"\"\n \n def __init__(self, instance):\n \"\"\"Initialize shared varaibles and verify instance type\"\"\"\n \n self.i = instance\n self.subj = subject_for_object(self.i)\n \n if self.i.name in PKI_CA_NAME_BLACKLIST:\n logger.error(\"Instance name '%s' is blacklisted!\" % self.i.name)\n raise\n \n if self.i.parent != None:\n self.parent_certs = os.path.join(PKI_DIR, self.i.parent.name, 'certs')\n self.crl = os.path.join(PKI_DIR, self.i.parent.name, 'crl', '%s.crl.pem' % self.i.parent.name)\n else:\n self.parent_certs = os.path.join(PKI_DIR, self.i.name, 'certs')\n self.crl = os.path.join(PKI_DIR, self.i.name, 'crl', '%s.crl.pem' % self.i.name)\n \n if isinstance(instance, pki.models.CertificateAuthority):\n self.ca_dir = os.path.join(PKI_DIR, self.i.name)\n self.key = os.path.join(self.ca_dir, 'private', '%s.key.pem' % self.i.name)\n self.pkcs12 = False\n self.i.subjaltname = ''\n elif isinstance(instance, pki.models.Certificate):\n if self.i.parent:\n self.ca_dir = os.path.join(PKI_DIR, self.i.parent.name)\n else:\n self.ca_dir = os.path.join(PKI_DIR, \"_SELF_SIGNED_CERTIFICATES\")\n if not os.path.exists(self.ca_dir):\n try:\n os.mkdir(self.ca_dir, 0755)\n os.mkdir(os.path.join(self.ca_dir, \"certs\"))\n except OSError, e:\n logger.exception(\"Failed to create directories for self-signed certificates %s\" % self.ca_dir)\n raise\n \n self.key = os.path.join(self.ca_dir, 'certs', '%s.key.pem' % self.i.name)\n self.pkcs12 = os.path.join(self.ca_dir, 'certs', '%s.cert.p12' % self.i.name)\n \n if not self.i.subjaltname:\n self.i.subjaltname = 'email:copy' \n else:\n raise Exception( \"Given object type is unknown!\" )\n \n if not self.i.crl_dpoints:\n self.i.crl_dpoints = ''\n \n self.csr = os.path.join(self.ca_dir, 'certs', '%s.csr.pem' % self.i.name)\n self.crt = os.path.join(self.ca_dir, 'certs', '%s.cert.pem' % self.i.name)\n self.der = os.path.join(self.ca_dir, 'certs', '%s.cert.der' % self.i.name)\n \n ## Generate a random string as ENV variable name\n self.env_pw = \"\".join(random.sample(string.letters+string.digits, 10))\n \n def exec_openssl(self, command, env_vars=None):\n \"\"\"Run a openssl command.\n \n command is prefixed with openssl binary from PKI_OPENSSL_BIN\n env_vars is a dict containing the set environment variables\n \"\"\"\n \n c = [PKI_OPENSSL_BIN]\n c.extend(command)\n \n # add PKI_DIR environment variable if caller did not set it\n if env_vars:\n env_vars.setdefault('PKI_DIR', PKI_DIR)\n else:\n env_vars = { 'PKI_DIR': PKI_DIR }\n \n proc = Popen( c, shell=False, env=env_vars, stdin=PIPE, stdout=PIPE, stderr=STDOUT )\n stdout_value, stderr_value = proc.communicate()\n \n if proc.returncode != 0:\n logger.error( 'openssl command \"%s\" failed with returncode %d' % (c[1], proc.returncode) )\n logger.error( stdout_value )\n \n raise Exception( stdout_value )\n else:\n return stdout_value\n \n def generate_key(self):\n \"\"\"RSA key generation.\n \n Key will be encrypted with des3 if passphrase is given.\n \"\"\"\n \n key_type = po = pf = ''\n \n if self.i.passphrase:\n key_type = '-des3'\n po = '-passout'\n pf = 'env:%s' % self.env_pw\n \n command = 'genrsa %s -out %s %s %s %s' % (key_type, self.key, po, pf, self.i.key_length)\n self.exec_openssl(command.split(), env_vars={ self.env_pw: str(self.i.passphrase) } )\n \n logger.debug(\"Finished %s bit private key generation\" % self.i.key_length)\n \n def generate_self_signed_cert(self):\n \"\"\"Generate a self signed root certificate.\n \n Serial is set to user specified value when PKI_SELF_SIGNED_SERIAL > 0\n \"\"\"\n \n logger.info(\"Generating new self-signed certificate (CN=%s, x509 extension=%s)\" % (self.i.common_name, self.i.extension))\n \n command = ['req', '-config', PKI_OPENSSL_CONF, '-verbose', '-batch', '-new', '-x509', '-subj', self.subj, '-days', str(self.i.valid_days), \\\n '-extensions', str(self.i.extension), '-key', self.key, '-out', self.crt, '-passin', 'env:%s' % self.env_pw]\n \n try:\n if PKI_SELF_SIGNED_SERIAL and int(PKI_SELF_SIGNED_SERIAL) > 0:\n command.extend( [ '-set_serial', str(PKI_SELF_SIGNED_SERIAL) ] )\n except ValueError, e:\n logger.error( \"Not setting inital serial number to %s. Fallback to random number\" % PKI_SELF_SIGNED_SERIAL )\n logger.error( e )\n \n env = { self.env_pw: str(self.i.passphrase), \"S_A_N\": self.i.subjaltname, \"C_D_P\": self.i.crl_dpoints }\n \n self.exec_openssl( command, env_vars=env )\n \n logger.info(\"Finished self-signed certificate creation\")\n \n def generate_csr(self):\n \"\"\"CSR (Certificate Signing Request) generation\"\"\"\n \n logger.info(\"Generating new CSR for %s\" % self.i.common_name )\n \n command = ['req', '-config', PKI_OPENSSL_CONF, '-new', '-batch', '-subj', self.subj, '-key', self.key, '-out', self.csr, \\\n '-days', str(self.i.valid_days), '-passin', 'env:%s' % self.env_pw] \n self.exec_openssl(command, env_vars={ self.env_pw: str(self.i.passphrase) })\n \n def generate_der_encoded(self):\n \"\"\"Generate a DER encoded certificate\"\"\"\n \n logger.info( 'Generating DER encoded certificate for %s' % self.i.common_name )\n \n command = 'x509 -in %s -out %s -outform DER' % (self.crt, self.der)\n self.exec_openssl(command.split())\n \n def remove_der_encoded(self):\n \"\"\"Remove a DER encoded certificate\"\"\"\n \n if os.path.exists(self.der):\n logger.info( 'Removal of DER encoded certificate for %s' % self.i.common_name )\n \n try:\n os.remove(self.der)\n except OSError, e:\n logger.error( \"Failed to remove %s\" % self.der )\n raise Exception( e )\n \n def generate_pkcs12_encoded(self):\n \"\"\"Generate a PKCS12 encoded certificate.\n \n Passphrase is required as empty passwords not work in batch mode.\n \"\"\"\n \n command = 'pkcs12 -export -in %s -inkey %s -out %s -passout env:%s' % (self.crt, self.key, self.pkcs12, self.env_pw)\n env_vars={ self.env_pw: str(self.i.pkcs12_passphrase), }\n \n if self.i.passphrase:\n key_pw = \"\".join(random.sample(string.letters+string.digits, 10))\n command += ' -passin env:%s' % key_pw\n env_vars[key_pw] = str(self.i.passphrase)\n \n self.exec_openssl(command.split(), env_vars)\n \n def remove_pkcs12_encoded(self):\n \"\"\"Remove a PKCS12 encoded certificate if it exists\"\"\"\n \n if self.pkcs12 and os.path.exists(self.pkcs12):\n logger.info( 'Removal of PKCS12 encoded certificate for %s' % self.i.name )\n \n os.remove(self.pkcs12)\n \n def remove_complete_certificate(self):\n \"\"\"Remove all files related to the given certificate.\n \n This includes the hash alias, key, csr and the certificate itself.\n \"\"\"\n \n self.remove_der_encoded()\n self.remove_pkcs12_encoded()\n \n hash = \"%s/%s.0\" % (self.parent_certs, self.get_hash_from_cert())\n if os.path.exists(hash):\n os.remove(hash)\n \n serial = \"%s/%s.pem\" % (self.parent_certs, self.get_serial_from_cert())\n if os.path.exists(serial):\n os.remove(serial)\n \n if os.path.exists(self.csr):\n os.remove(self.csr)\n \n if os.path.exists(self.key):\n os.remove(self.key)\n \n if os.path.exists(self.crt):\n os.remove(self.crt)\n \n def sign_csr(self):\n \"\"\"Sign the CSR.\n \n Certificate signing and hash creation in CA's certificate directory\n \"\"\"\n \n env = { self.env_pw: str(self.i.parent_passphrase), \"S_A_N\": self.i.subjaltname, \"C_D_P\": self.i.crl_dpoints}\n \n command = 'ca -config %s -name %s -batch -in %s -out %s -days %d -extensions %s -passin env:%s' % \\\n ( PKI_OPENSSL_CONF, self.i.parent.name, self.csr, self.crt, self.i.valid_days, self.i.extension, self.env_pw)\n \n self.exec_openssl(command.split(), env_vars=env)\n \n ## Get the just created serial\n if self.parent_certs:\n serial = self.get_serial_from_cert()\n hash = self.get_hash_from_cert()\n \n if os.path.exists('%s/%s.0' % (self.parent_certs, hash)):\n os.remove('%s/%s.0' % (self.parent_certs, hash))\n \n os.symlink('%s.pem' % serial, '%s/%s.0' % (self.parent_certs, hash))\n \n def revoke_certificate(self, ppf):\n \"\"\"Revoke a certificate.\n \n Requires the parents passphrase.\n \"\"\"\n \n ## Check if certificate is already revoked. May have happened during a incomplete transaction\n if self.get_revoke_status_from_cert():\n logger.info( \"Skipping revoke as it already happened\" )\n return True\n \n command = 'ca -config %s -name %s -batch -revoke %s -passin env:%s' % (PKI_OPENSSL_CONF, self.i.parent.name, self.crt, self.env_pw)\n self.exec_openssl(command.split(), env_vars={ self.env_pw: str(ppf) })\n \n def generate_crl(self, ca=None, pf=None):\n \"\"\"CRL (Certificate Revocation List) generation.\n \n Requires the Certificate Authority and the passphrase. \n \"\"\"\n \n crl = os.path.join(PKI_DIR, ca, 'crl', '%s.crl.pem' % ca)\n \n command = 'ca -config %s -name %s -gencrl -out %s -crldays 1 -passin env:%s' % (PKI_OPENSSL_CONF, ca, crl, self.env_pw)\n self.exec_openssl(command.split(), env_vars={ self.env_pw: str(pf) })\n \n def update_ca_chain_file(self):\n \"\"\"Build/update the CA chain.\n \n Generates a chain file containing all CA's required to verify the given certificate.\n \"\"\"\n \n ## Build list of parents\n chain = []\n chain_str = ''\n \n p = self.i.parent\n \n if self.i.parent == None:\n chain.append( self.i.name )\n else:\n chain.append( self.i.name )\n while p != None:\n chain.append(p.name)\n p = p.parent\n \n chain.reverse()\n \n chain_file = os.path.join( PKI_DIR, self.i.name, '%s-chain.cert.pem' % self.i.name )\n \n try:\n w = open(chain_file, 'w')\n \n for c in chain:\n cert_file = os.path.join( PKI_DIR, c, 'certs', '%s.cert.pem' % c )\n command = 'x509 -in %s' % cert_file\n output = self.exec_openssl(command.split())\n \n ## Get the subject to print it first in the chain file\n subj = subject_for_object(self.i)\n \n w.write( '%s\\n' % subj )\n w.write(output)\n \n w.close()\n except:\n raise Exception( 'Failed to write chain file!' )\n \n def get_serial_from_cert(self):\n \"\"\"Extract serial from certificate.\n \n Use openssl to get the serial number from a certificate.\n \"\"\"\n \n command = 'x509 -in %s -noout -serial' % self.crt\n output = self.exec_openssl(command.split())\n \n x = output.rstrip(\"\\n\").split('=')\n \n if (len(x[1]) > 2):\n sl = re.findall('[a-fA-F0-9]{2}', x[1].lower())\n return ':'.join(sl)\n \n return x[1].lower()\n \n def get_hash_from_cert(self):\n \"\"\"Extract hash from certificate.\n \n Use openssl to get the hash value from a certificate.\n \"\"\"\n \n command = 'x509 -hash -noout -in %s' % self.crt\n output = self.exec_openssl(command.split())\n \n return output.rstrip(\"\\n\")\n \n def get_revoke_status_from_cert(self):\n \"\"\"Get the revoke status from certificate.\n \n Certificate is revoked => True\n Certificate is active => False\n \"\"\"\n \n command = 'crl -text -noout -in %s' % self.crl\n output = self.exec_openssl(command.split())\n \n serial_re = re.compile('^\\s+Serial\\sNumber\\:\\s+(\\w+)')\n lines = output.split('\\n')\n \n for l in lines:\n if serial_re.match(l):\n if serial_re.match(l).group(1) == self.i.serial:\n logger.info( \"The certificate is revoked\" )\n return True\n \n return False\n \n def dump_certificate(self):\n \"\"\"Dump a certificate\"\"\"\n \n command = \"x509 -in %s -noout -text\" % self.crt\n output = self.exec_openssl(command.split())\n \n return \"%s\" % output\n \n def rollback(self):\n \"\"\"Rollback on failed operations\"\"\"\n \n pass\n" }, { "alpha_fraction": 0.7293666005134583, "alphanum_fraction": 0.7293666005134583, "avg_line_length": 26.473684310913086, "blob_id": "03f7528dcc925bf0120a177e528df3d2b057eb94", "content_id": "252d5454f4ff4f821e92c3fc735726f4dc375c09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "no_license", "max_line_length": 52, "num_lines": 19, "path": "/webadmin/core/admin.py", "repo_name": "darkfower/ops-manager", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom webadmin.core.models import Proj, Host, IDC, IP\n\nclass ProjAdmin(admin.ModelAdmin):\n actions = ['delete_selected']\n\nclass HostAdmin(admin.ModelAdmin):\n actions = ['delete_selected']\n\nclass IDCAdmin(admin.ModelAdmin):\n actions = ['delete_selected']\n\nclass IPAdmin(admin.ModelAdmin):\n actions = ['delete_selected']\n\nadmin.site.register(Proj, ProjAdmin)\nadmin.site.register(Host, HostAdmin)\nadmin.site.register(IDC, IDCAdmin)\nadmin.site.register(IP, IPAdmin)" }, { "alpha_fraction": 0.512230396270752, "alphanum_fraction": 0.5149804353713989, "avg_line_length": 36.548912048339844, "blob_id": "b8b08c33a4f66b9f4dc6d82345758a8c9c14a794", "content_id": "5af6d3494efbc6809ab684141fee8886500192e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6909, "license_type": "no_license", "max_line_length": 179, "num_lines": 184, "path": "/webadmin/pki/helper.py", "repo_name": "darkfower/ops-manager", "src_encoding": "UTF-8", "text": "import os\nimport tempfile\nimport random\nimport zipfile\nimport logging\nimport string\n\nfrom django.utils.safestring import mark_safe\nfrom django.core import urlresolvers\n\nfrom pki.settings import PKI_DIR, PKI_BASE_URL, MEDIA_URL\nimport pki.models\n\nlogger = logging.getLogger(\"pki\")\n\ndef get_pki_icon_html(img, title=\"\", css=\"centered\", id=\"\"):\n \"\"\"Return HTML for given image.\n \n Can add optional alt and title parameters.\n \"\"\"\n \n if css:\n css_class = 'class=%s' % css\n else:\n css_class = ''\n \n img_path = os.path.join(PKI_BASE_URL, MEDIA_URL, 'pki/img', img)\n return '<img id=\"%s\" %s src=\"%s\" alt=\"%s\" title=\"%s\"/>' % (id, css_class, img_path, title, title)\n\ndef files_for_object(obj):\n \"\"\"Return files associated to object.\n \n Return dict containing all files associated to object. Dict contains\n chain, crl, pem, csr, der, pkcs12 and key\n \"\"\"\n \n if isinstance(obj, pki.models.CertificateAuthority):\n chain = c_name = obj.name\n ca_dir = os.path.join(PKI_DIR, obj.name)\n key_loc = os.path.join(ca_dir, 'private')\n elif isinstance(obj, pki.models.Certificate):\n if obj.parent:\n chain = obj.parent.name\n ca_dir = os.path.join(PKI_DIR, obj.parent.name)\n else:\n chain = obj.name\n ca_dir = os.path.join(PKI_DIR, '_SELF_SIGNED_CERTIFICATES')\n \n c_name = obj.name\n key_loc = os.path.join(ca_dir, 'certs')\n else:\n raise Exception( \"Given object type is unknown!\" )\n \n files = { 'chain' : { 'path': os.path.join(ca_dir, '%s-chain.cert.pem' % chain),\n 'name': '%s-chain.cert.pem' % chain,\n },\n 'crl' : { 'path': os.path.join(ca_dir, 'crl', '%s.crl.pem' % chain),\n 'name': '%s.crl.pem' % chain,\n },\n 'pem' : { 'path': os.path.join(ca_dir, 'certs', '%s.cert.pem' % c_name),\n 'name': '%s.cert.pem' % c_name,\n },\n 'csr' : { 'path': os.path.join(ca_dir, 'certs', '%s.csr.pem' % c_name),\n 'name': '%s.csr.pem' % c_name,\n },\n 'der' : { 'path': os.path.join(ca_dir, 'certs', '%s.cert.der' % c_name),\n 'name': '%s.cert.der' % c_name,\n },\n 'pkcs12': { 'path': os.path.join(ca_dir, 'certs', '%s.cert.p12' % c_name),\n 'name': '%s.cert.p12' % c_name,\n },\n 'key' : { 'path': os.path.join(ca_dir, key_loc, '%s.key.pem' % c_name),\n 'name': '%s.key.pem' % c_name,\n },\n }\n \n return files\n\ndef subject_for_object(obj):\n \"\"\"Return a subject string.\n \n A OpenSSL compatible subject string is returned.\n \"\"\"\n \n subj = '/CN=%s/C=%s/ST=%s/localityName=%s/O=%s' % ( obj.common_name,\n obj.country,\n obj.state,\n obj.locality,\n obj.organization,\n )\n \n if obj.OU:\n subj += '/organizationalUnitName=%s' % obj.OU\n \n if obj.email:\n subj += '/emailAddress=%s' % obj.email\n \n return subj\n\ndef chain_recursion(r_id, store, id_dict):\n \"\"\"Helper function for recusion\"\"\"\n \n i = pki.models.CertificateAuthority.objects.get(pk=r_id)\n \n div_content = build_delete_item(i)\n store.append( mark_safe('Certificate Authority: <a href=\"%s\">%s</a> <img src=\"%spki/img/plus.png\" class=\"switch\" /><div class=\"details\">%s</div>' % \\\n (urlresolvers.reverse('admin:pki_certificateauthority_change', args=(i.pk,)), i.name, MEDIA_URL, div_content)) )\n \n id_dict['ca'].append(i.pk)\n \n ## Search for child certificates\n child_certs = pki.models.Certificate.objects.filter(parent=r_id)\n if child_certs:\n helper = []\n for cert in child_certs:\n div_content = build_delete_item(cert)\n helper.append( mark_safe('Certificate: <a href=\"%s\">%s</a> <img src=\"%spki/img/plus.png\" class=\"switch\" /><div class=\"details\">%s</div>' % \\\n (urlresolvers.reverse('admin:pki_certificate_change', args=(cert.pk,)), cert.name, MEDIA_URL, div_content)) )\n id_dict['cert'].append(cert.pk)\n store.append(helper)\n \n ## Search for related CA's\n child_cas = pki.models.CertificateAuthority.objects.filter(parent=r_id)\n if child_cas:\n helper = []\n for ca in child_cas:\n chain_recursion(ca.pk, helper, id_dict)\n store.append(helper)\n\ndef build_delete_item(obj):\n \"\"\"Build div tag for delete details\"\"\"\n \n parent = 'None'\n if obj.parent is not None:\n parent = obj.parent.name\n \n return \"<ul><li>Serial: %s</li><li>Subject: %s</li><li>Parent: %s</li><li>Description: %s</li><li>x509 Extension: %s</li><li>Created: %s</li><li>Expiry date: %s</li></ul>\" % \\\n ( obj.serial, subject_for_object(obj), parent, obj.description, obj.extension, obj.created, obj.expiry_date)\n\ndef generate_temp_file():\n \"\"\"Generate a filename in the systems temp directory\"\"\"\n \n f = os.path.join(tempfile.gettempdir(), \"\".join(random.sample(string.letters+string.digits, 25)))\n \n if os.path.exists(f):\n raise Exception( \"The generated temp file %s already exists!\" % f )\n \n return f\n\ndef build_zip_for_object(obj, request):\n \"\"\"Build zip with filed ob object.\n \n request is required to check permissions. Zip file path is returned.\n \"\"\"\n \n try:\n base_folder = 'PKI_DATA_%s' % obj.name\n files = files_for_object(obj)\n zip_f = generate_temp_file()\n \n c_zip = zipfile.ZipFile(zip_f, 'w')\n \n c_zip.write(files['key']['path'], files['key']['name'])\n c_zip.write(files['pem']['path'], files['pem']['name'])\n \n if isinstance(obj, pki.models.CertificateAuthority) or obj.parent:\n c_zip.write(files['chain']['path'], files['chain']['name'])\n c_zip.write(files['crl']['path'], files['crl']['name'])\n \n try:\n if obj.pkcs12_encoded:\n c_zip.write(files['pkcs12']['path'], files['pkcs12']['name'])\n except AttributeError:\n pass\n \n if obj.der_encoded:\n c_zip.write(files['der']['path'], files['der']['name'])\n \n c_zip.close()\n except Exception, e:\n logger.error(\"Exception during zip file creation: %s\" % e)\n raise Exception(e)\n \n return zip_f\n" }, { "alpha_fraction": 0.5635948777198792, "alphanum_fraction": 0.5677007436752319, "avg_line_length": 37.591548919677734, "blob_id": "12eac588a3f2924b22d2c7748da1546fc3560750", "content_id": "d25f264c552e2df0df85041db5a2188bee8aa672", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10960, "license_type": "no_license", "max_line_length": 157, "num_lines": 284, "path": "/webadmin/pki/views.py", "repo_name": "darkfower/ops-manager", "src_encoding": "UTF-8", "text": "import os\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponseBadRequest\nfrom django.utils.safestring import mark_safe\nfrom django.template import RequestContext\nfrom django.core import urlresolvers\n\nfrom pki.settings import PKI_LOG, MEDIA_URL, PKI_ENABLE_GRAPHVIZ, PKI_ENABLE_EMAIL\nfrom pki.models import CertificateAuthority, Certificate\nfrom pki.forms import DeleteForm\nfrom pki.graphviz import ObjectChain, ObjectTree\nfrom pki.email import SendCertificateData\nfrom pki.helper import files_for_object, chain_recursion, build_delete_item, generate_temp_file, build_zip_for_object\nfrom pki.openssl import refresh_pki_metadata\n\nlogger = logging.getLogger(\"pki\")\n\n##------------------------------------------------------------------##\n## Download views\n##------------------------------------------------------------------##\n\n@login_required\ndef pki_download(request, model, id):\n \"\"\"Download PKI data.\n \n Type (ca/cert) and ID are used to determine the object to download.\n \"\"\"\n \n if not request.user.has_perm('pki.can_download'):\n messages.error(request, \"Permission denied!\")\n return HttpResponseRedirect(urlresolvers.reverse('admin:pki_%s_changelist' % model))\n \n if model == \"certificateauthority\":\n c = get_object_or_404(CertificateAuthority, pk=id)\n elif model == \"certificate\":\n c = get_object_or_404(Certificate, pk=id)\n else:\n logger.error( \"Unsupported type %s requested!\" % type )\n return HttpResponseBadRequest()\n \n if not c.active:\n raise Http404\n \n zip = build_zip_for_object(c, request)\n \n ## open and read the file if it exists\n if os.path.exists(zip):\n f = open(zip)\n x = f.readlines()\n f.close()\n \n ## return the HTTP response\n response = HttpResponse(x, mimetype='application/force-download')\n response['Content-Disposition'] = 'attachment; filename=\"PKI_DATA_%s.zip\"' % c.name\n \n return response\n else:\n logger.error( \"File not found: %s\" % zip )\n raise Http404\n\n##------------------------------------------------------------------##\n## Graphviz views\n##------------------------------------------------------------------##\n\n@login_required\ndef pki_chain(request, model, id):\n \"\"\"Display the CA chain as PNG.\n \n Requires PKI_ENABLE_GRAPHVIZ set to true. Type (ca/cert) and ID are used to determine the object.\n Create object chain PNG using graphviz and return it to the user.\n \"\"\"\n \n if PKI_ENABLE_GRAPHVIZ is not True:\n messages.warning(request, \"Chain view is disabled unless setting PKI_ENABLE_GRAPHVIZ is set to True\")\n return HttpResponseRedirect(urlresolvers.reverse('admin:pki_%s_changelist' % model))\n \n if model == \"certificateauthority\":\n obj = get_object_or_404(CertificateAuthority, pk=id)\n elif model == \"certificate\":\n obj = get_object_or_404(Certificate, pk=id)\n \n png = generate_temp_file()\n ObjectChain(obj, png)\n \n try:\n if os.path.exists(png):\n f = open(png)\n x = f.read()\n f.close()\n os.remove(png)\n except OSError,e:\n logger.error( \"Failed to load depency tree: %s\" % e)\n raise Exception( e )\n \n response = HttpResponse(x, mimetype='image/png')\n return response\n\n@login_required\ndef pki_tree(request, id):\n \"\"\"Display the CA tree as PNG.\n \n Requires PKI_ENABLE_GRAPHVIZ set to true. Only works for Certificate Authorities.\n All object related to the CA obj are fetched and displayed in a Graphviz tree.\n \"\"\"\n \n if PKI_ENABLE_GRAPHVIZ is not True:\n messages.warning(request, \"Tree view is disabled unless setting PKI_ENABLE_GRAPHVIZ is set to True\")\n return HttpResponseRedirect(urlresolvers.reverse('admin:pki_certificateauthority_changelist'))\n \n obj = get_object_or_404(CertificateAuthority, pk=id)\n png = generate_temp_file()\n \n ObjectTree(obj, png)\n \n try:\n if os.path.exists(png):\n f = open(png)\n x = f.read()\n f.close()\n \n os.remove(png)\n except OSError,e:\n logger.error( \"Failed to load depency tree: %s\" % e)\n raise Exception( e )\n \n response = HttpResponse(x, mimetype='image/png')\n return response\n\n##------------------------------------------------------------------##\n## Email views\n##------------------------------------------------------------------##\n\n@login_required\ndef pki_email(request, model, id):\n \"\"\"Send email with certificate data attached.\n \n Requires PKI_ENABLE_EMAIL set to true. Type (ca/cert) and ID are used to determine the object.\n Build ZIP, send email and return to changelist.\n \"\"\"\n \n if PKI_ENABLE_EMAIL is not True:\n messages.warning(request, \"Email delivery is disabled unless setting PKI_ENABLE_EMAIL is set to True\")\n return HttpResponseRedirect(urlresolvers.reverse('admin:pki_%s_changelist' % model))\n \n if model == \"certificateauthority\":\n obj = get_object_or_404(CertificateAuthority, pk=id)\n elif model == \"certificate\":\n obj = get_object_or_404(Certificate, pk=id)\n \n if obj.email and obj.active:\n SendCertificateData(obj, request)\n else:\n raise Http404\n \n messages.info(request, 'Email to \"%s\" was sent successfully.' % obj.email)\n return HttpResponseRedirect(urlresolvers.reverse('admin:pki_%s_changelist' % model))\n\n##------------------------------------------------------------------##\n## Management views\n##------------------------------------------------------------------##\n\n@login_required\ndef pki_refresh_metadata(request):\n \"\"\"Rebuild PKI metadate.\n \n Renders openssl.conf template and cleans PKI_DIR.\n \"\"\"\n \n ca_objects = list(CertificateAuthority.objects.all())\n refresh_pki_metadata(ca_objects)\n messages.info(request, 'Successfully refreshed PKI metadata (%d certificate authorities)' % len(ca_objects))\n \n back = request.META.get('HTTP_REFERER', None) or '/'\n return HttpResponseRedirect(back)\n\n##------------------------------------------------------------------##\n## Admin views\n##------------------------------------------------------------------##\n\n@login_required\ndef admin_history(request, model, id):\n \"\"\"Overwrite the default admin history view\"\"\"\n \n from django.contrib.contenttypes.models import ContentType\n from pki.models import PkiChangelog\n \n ct = ContentType.objects.get(model=model)\n model_obj = ct.model_class()\n obj = model_obj.objects.get(pk=id)\n \n changelogs = PkiChangelog.objects.filter(model_id=ct.pk).filter(object_id=id)\n \n return render_to_response('admin/pki/object_changelogs.html', { 'changelogs': changelogs, 'title': \"Change history: %s\" % obj.common_name,\n 'app_label': model_obj._meta.app_label, 'object': obj,\n 'module_name': model_obj._meta.verbose_name_plural,\n }, RequestContext(request))\n\n@login_required\ndef admin_delete(request, model, id):\n \"\"\"Overwite the default admin delete view\"\"\"\n \n deleted_objects = []\n parent_object_name = CertificateAuthority._meta.verbose_name\n title = 'Are you sure?'\n \n if model == 'certificateauthority':\n ## Get the list of objects to delete as list of lists\n item = get_object_or_404(CertificateAuthority, pk=id)\n chain_recursion(item.id, deleted_objects, id_dict={ 'cert': [], 'ca': [], })\n \n ## Fill the required data for delete_confirmation.html template\n opts = CertificateAuthority._meta\n object = item.name\n initial_id = False\n \n ## Set the CA to verify the passphrase against\n if item.parent_id:\n initial_id = item.parent_id\n auth_object = CertificateAuthority.objects.get(pk=item.parent_id).name\n else:\n initial_id = item.pk\n auth_object = item.name\n elif model == 'certificate':\n ## Fetch the certificate data\n try:\n item = Certificate.objects.select_related().get(pk=id)\n except:\n raise Http404\n \n if not item.parent_id:\n parent_object_name = \"self-signed certificate\"\n initial_id = item.id\n authentication_obj = item.name\n else:\n initial_id = item.parent_id\n authentication_obj = item.parent.name\n \n div_content = build_delete_item(item)\n deleted_objects.append( mark_safe('Certificate: <a href=\"%s\">%s</a> <img src=\"%spki/img/plus.png\" class=\"switch\" /><div class=\"details\">%s</div>' % \\\n (urlresolvers.reverse('admin:pki_certificate_change', args=(item.pk,)), item.name, MEDIA_URL, div_content)) )\n \n ## Fill the required data for delete_confirmation.html template\n opts = Certificate._meta\n object = item.name\n \n ## Set the CA to verify the passphrase against\n auth_object = authentication_obj\n \n if request.method == 'POST':\n form = DeleteForm(request.POST)\n \n if form.is_valid():\n item.delete(request.POST['passphrase'])\n messages.info(request, 'The %s \"%s\" was deleted successfully.' % (opts.verbose_name, object))\n return HttpResponseRedirect(urlresolvers.reverse('admin:pki_%s_changelist' % model))\n else:\n form = DeleteForm()\n \n form.fields['_model'].initial = model\n form.fields['_id'].initial = id\n \n return render_to_response('admin/pki/delete_confirmation.html', { 'deleted_objects': deleted_objects, 'object_name': opts.verbose_name,\n 'app_label': opts.app_label, 'opts': opts, 'object': object, 'form': form,\n 'auth_object': auth_object, 'parent_object_name': parent_object_name,\n 'title': title,\n }, RequestContext(request))\n\n##------------------------------------------------------------------##\n## Exception viewer\n##------------------------------------------------------------------##\n\n@login_required\ndef show_exception(request):\n \"\"\"Render error page and fill it with the PKI_LOG content\"\"\"\n \n f = open(PKI_LOG, 'r')\n log = f.readlines()\n f.close()\n \n return render_to_response('500.html', {'log': log})\n" }, { "alpha_fraction": 0.4660579562187195, "alphanum_fraction": 0.4727272689342499, "avg_line_length": 52.82478713989258, "blob_id": "aedd9c322f6304a799ce85d38c7faa0b71eb08ef", "content_id": "06ec3ebd76b03b29665d1ac9c985d5bf72fb3981", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12595, "license_type": "no_license", "max_line_length": 191, "num_lines": 234, "path": "/webadmin/pki/admin.py", "repo_name": "darkfower/ops-manager", "src_encoding": "UTF-8", "text": "import os\nimport logging\n\nfrom django.contrib import admin, messages\nfrom django.db.models import Q\nfrom django.http import HttpResponseRedirect\n\nfrom pki.models import CertificateAuthority, Certificate, x509Extension\nfrom pki.forms import CertificateAuthorityForm, CertificateForm, x509ExtensionForm\nfrom pki.views import admin_delete, admin_history\nfrom pki.settings import PKI_DIR, PKI_LOG, PKI_LOGLEVEL, JQUERY_URL\n\n##------------------------------------------------------------------##\n## Create PKI_DIR if it's missing\n##------------------------------------------------------------------##\n\nif not os.path.exists( PKI_DIR ):\n try:\n os.mkdir( PKI_DIR, 0750 )\n except OSError, e:\n print \"Failed to create PKI_DIR %s: %s\" % (PKI_DIR, e)\n\n##------------------------------------------------------------------##\n## Initialize logging\n##------------------------------------------------------------------##\n\nLOG_LEVELS = { 'debug' : logging.DEBUG,\n 'info' : logging.INFO,\n 'warning' : logging.WARNING,\n 'error' : logging.ERROR,\n 'critical' : logging.CRITICAL\n }\n\nlogger = logging.getLogger(\"pki\")\n\nl_hdlr = logging.FileHandler(PKI_LOG)\nl_hdlr.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)s - %(module)s.%(funcName)s > %(message)s\"))\n\nif LOG_LEVELS[PKI_LOGLEVEL]:\n logger.setLevel(LOG_LEVELS[PKI_LOGLEVEL])\n\nlogger.addHandler(l_hdlr)\n\n##---------------------------------##\n## Interface setup\n##---------------------------------##\n\n## Disable delete_selected\nadmin.site.disable_action('delete_selected')\n\nclass CertificateBaseAdmin(admin.ModelAdmin):\n \"\"\"Base class for Certificate* Admin models\"\"\"\n \n save_on_top = True\n actions = []\n list_per_page = 25\n \n class Media:\n js = ( JQUERY_URL, 'pki/js/jquery.tipsy.js', 'pki/js/pki_admin.min.js', )\n css = { 'screen': ( 'pki/css/pki.css', 'pki/css/tipsy.css', ), }\n \n def save_model(self, request, obj, form, change):\n \"\"\"Override builtin save_model function to pass user to model save\"\"\"\n \n obj.user = request.user\n obj.save()\n\nclass Certificate_Authority_Admin(CertificateBaseAdmin):\n \"\"\"CertificateAuthority admin definition\"\"\"\n \n form = CertificateAuthorityForm\n list_display = ( 'id', 'common_name', 'Serial_align_right', 'Valid_center', 'Chain_link', 'Tree_link', 'Parent_link',\n 'Expiry_date', 'Description', 'Creation_date', 'Revocation_date', 'Child_certs', 'Download_link', 'Email_link', )\n list_display_links = ( 'common_name', )\n list_filter = ( 'parent', 'active', 'extension', )\n radio_fields = { \"action\": admin.VERTICAL }\n search_fields = [ 'name', 'common_name', 'description' ]\n date_hierarchy = 'created'\n readonly_fields = ( 'Expiry_date', 'Creation_date', 'Revocation_date', 'serial', 'Chain', 'Certificate_Dump', 'CA_Clock', 'State', )\n fieldsets = ( ( 'Define action', { 'fields': ( 'action', ), }, ),\n ( 'Documentation', { 'fields': ( 'description', ),\n 'classes': [ 'wide', ],\n },\n ),\n ( 'Certificate Dump', { 'fields': ( 'Certificate_Dump', ),\n 'classes': [ 'collapse', 'wide', ],\n },\n ),\n ( 'Certificate', { 'fields': ( 'State', 'common_name', 'name', 'country', 'state', 'locality', 'organization',\n 'OU', 'email', 'key_length', 'valid_days', 'extension', 'passphrase', 'passphrase_verify',\n 'serial', 'Expiry_date', 'Creation_date', 'Revocation_date',\n ),\n 'classes': [ 'wide', ],\n },\n ),\n ( 'Encoding options', { 'fields': ( 'der_encoded', ), },\n ),\n ( 'Certificate signing', { 'fields': ( 'CA_Clock', 'Chain', 'parent', 'parent_passphrase', 'crl_dpoints', 'policy', ),\n 'classes': [ 'wide', ],\n },\n ),\n )\n \n def delete_view(self, request, object_id, extra_context=None):\n return admin_delete(request, self.model._meta.module_name, object_id)\n \n def history_view(self, request, object_id, extra_context=None):\n return admin_history(request, self.model._meta.module_name, object_id)\n \n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n \"\"\"Filter foreign key parent field.\n \n Skip CAs that dont have a matching x509 extension or are not active.\n \"\"\"\n \n if db_field.name == \"parent\":\n kwargs[\"queryset\"] = CertificateAuthority.objects.filter(extension__basic_constraints__contains=\"CA:TRUE\", active=True).exclude(extension__basic_constraints__contains=\"pathlen:0\")\n return db_field.formfield(**kwargs)\n elif db_field.name == \"extension\":\n kwargs[\"queryset\"] = x509Extension.objects.filter(basic_constraints__contains=\"CA:TRUE\", key_usage__name__contains=\"keyCertSign\")\n return db_field.formfield(**kwargs)\n \n return super(Certificate_Authority_Admin, self).formfield_for_foreignkey(db_field, request, **kwargs)\n \nadmin.site.register(CertificateAuthority, Certificate_Authority_Admin)\n\nclass Certificate_Admin(CertificateBaseAdmin):\n \"\"\"CertificateAuthority admin definition\"\"\"\n form = CertificateForm\n list_display = ( 'id', 'common_name', 'Serial_align_right', 'Valid_center', 'Chain_link', 'Parent_link',\n 'Expiry_date', 'Description', 'Creation_date', 'Revocation_date', 'Download_link', 'Email_link' )\n list_display_links = ( 'common_name', )\n radio_fields = { \"action\": admin.VERTICAL }\n list_filter = ( 'parent', 'active', 'extension', )\n search_fields = [ 'name', 'description' ]\n date_hierarchy = 'created'\n readonly_fields = ( 'Expiry_date', 'Creation_date', 'Revocation_date', 'serial', 'Chain', 'Certificate_Dump', 'CA_Clock', 'State', )\n fieldsets = ( ( 'Define action', { 'fields': ( 'action', ) } ),\n ( 'Documentation', { 'fields': ( 'description', ),\n 'classes': [ 'wide', ],\n },\n ),\n ( 'Certificate Dump', { 'fields': ( 'Certificate_Dump', ),\n 'classes': [ 'collapse', 'wide', ],\n },\n ),\n ( 'Certificate', { 'fields': ( 'State', 'common_name', 'name', 'country', 'state', 'locality', 'organization',\n 'OU', 'email', 'key_length', 'valid_days', 'extension',\n 'passphrase', 'passphrase_verify', 'serial', 'Expiry_date', 'Creation_date',\n 'Revocation_date',\n ),\n 'classes': [ 'wide', ],\n },\n ),\n ( 'Multi-domain / SubjectAltName', { 'fields': ( 'subjaltname', ),\n 'classes': [ 'wide', ],\n },\n ),\n ( 'Encoding options', { 'fields': ( 'der_encoded', 'pkcs12_encoded', 'pkcs12_passphrase', 'pkcs12_passphrase_verify', ),\n 'classes': [ 'wide', ],\n },\n ),\n ( 'Certificate signing', { 'fields': ( 'CA_Clock', 'Chain', 'parent', 'parent_passphrase', 'crl_dpoints', ),\n 'classes': [ 'wide', ],\n },\n ),\n )\n \n def delete_view(self, request, object_id, extra_context=None):\n return admin_delete(request, self.model._meta.module_name, object_id)\n \n def history_view(self, request, object_id, extra_context=None):\n return admin_history(request, self.model._meta.module_name, object_id)\n \n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n \"\"\"Filter foreign key parent field.\n \n Skip CAs that dont have a matching x509 extension or are not active.\n Skip x509 extensions that are not sufficient for enduser certificates.\n \"\"\"\n \n if db_field.name == \"parent\":\n kwargs[\"queryset\"] = CertificateAuthority.objects.filter(extension__basic_constraints__contains=\"CA:TRUE\", \\\n active=True).filter(extension__basic_constraints__contains=\"pathlen:0\")\n return db_field.formfield(**kwargs)\n elif db_field.name == \"extension\":\n kwargs[\"queryset\"] = x509Extension.objects.filter(Q(basic_constraints__contains=\"CA:FALSE\") | \\\n ((Q(basic_constraints__contains=\"CA:TRUE\") & \\\n Q(basic_constraints__contains=\"pathlen:0\")) & \\\n ~Q(key_usage__name__contains=\"keyCertSign\")))\n return db_field.formfield(**kwargs)\n \n return super(Certificate_Admin, self).formfield_for_foreignkey(db_field, request, **kwargs)\n \nadmin.site.register(Certificate, Certificate_Admin)\n\nclass x509Extension_Admin(CertificateBaseAdmin):\n \"\"\"Admin instance for x509 extensions\"\"\"\n \n form = x509ExtensionForm\n list_display = ( 'id', 'name', 'description', 'basic_constraints', 'key_usage_csv', 'ext_key_usage_csv', 'created', 'CrlDpoint_center', )\n list_display_links = ( 'name', )\n search_fields = [ 'name', 'description', ]\n date_hierarchy = 'created'\n fieldsets = ( ( 'X509 extension', { 'fields': ( 'name', 'description', 'basic_constraints', 'basic_constraints_critical', 'key_usage',\n 'key_usage_critical', 'extended_key_usage', 'extended_key_usage_critical', \n 'subject_key_identifier', 'authority_key_identifier', 'crl_distribution_point',\n ),\n 'classes': [ 'wide', ],\n },\n ),\n )\n \n def save_model(self, request, obj, form, change):\n if change:\n request.user.get_and_delete_messages()\n else:\n obj.user = request.user\n obj.save()\n \n def delete_view(self, request, object_id, extra_context=None):\n x509 = x509Extension.objects.get(pk=object_id)\n if x509.certificateauthority_set.all() or x509.certificate_set.all():\n logger.error(\"x509 extension \\\"%s\\\" cannot be removed because it is in use!\" % x509.name)\n messages.error(request, 'x509 extension \"%s\" cannot be removed because it is in use!' % x509.name)\n return HttpResponseRedirect(\"../../\")\n else:\n return super(x509Extension_Admin, self).delete_view(request, object_id, extra_context)\n \n def response_change(self, request, obj):\n messages.warning(request, 'You cannot modify x509 extensions!')\n return HttpResponseRedirect(\"../\")\n \nadmin.site.register(x509Extension, x509Extension_Admin)\n" } ]
5
Stefany-Olivera/Python_Project
https://github.com/Stefany-Olivera/Python_Project
8a115ee79f5e06d83e68d24520f73cd3fc921358
cea3ea3f205d6ec7bb8de8817fd9fed5cd79373a
b3ccc951df9fe4a3e76476f5f3f460ea9a633bb6
refs/heads/master
2021-05-04T04:25:42.032523
2018-02-05T16:44:20
2018-02-05T16:44:20
120,332,922
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6310122013092041, "alphanum_fraction": 0.6338837146759033, "avg_line_length": 30.670454025268555, "blob_id": "5c0a7e1218e360402e3a37ea9587041f8577fb24", "content_id": "b236275191e94c335d3d8d74d1c921b49e95921c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2786, "license_type": "no_license", "max_line_length": 79, "num_lines": 88, "path": "/apps/friend2_app/views.py", "repo_name": "Stefany-Olivera/Python_Project", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\nfrom django.contrib import messages\nfrom django.shortcuts import render, redirect, HttpResponse\nfrom .models import User, Friend\n\ndef index(request):\n # print(User.objects.all())\n return render(request, 'friend2_app/index.html')\n\ndef register(request):\n # print request.POST\n response = User.objects.register(\n name = request.POST[\"name\"],\n alias = request.POST[\"alias\"],\n email = request.POST[\"email\"],\n password = request.POST[\"password\"],\n confirm_password = request.POST[\"confirm_password\"],\n bday = request.POST[\"bday\"],\n )\n \n if response['valid']:\n messages.add_message(request, messages.SUCCESS, 'Welcome to the site!')\n request.session[\"user_id\"]=response[\"user\"].id\n request.session[\"name\"]=response[\"user\"].name\n return redirect(\"/friends\")\n else:\n for error_message in response[\"errors\"]:\n messages.add_message(request, messages.ERROR, error_message)\n return redirect(\"/\")\n \ndef login(request):\n response = User.objects.login(\n email = request.POST[\"email\"],\n password = request.POST[\"password\"]\n )\n if response['valid']:\n messages.add_message(request, messages.SUCCESS, 'See you soon!')\n request.session[\"user_id\"]=response[\"user\"].id\n request.session[\"name\"]=response[\"user\"].name\n return redirect(\"/friends\")\n else:\n for error_message in response[\"errors\"]:\n messages.add_message(request, messages.ERROR, error_message)\n\n return redirect(\"/\")\n\ndef dashboard(request):\n person = User.objects.get(id=request.session['user_id'])\n users = User.objects.all()\n others = []\n for otheruser in users:\n if (otheruser.id != request.session['user_id']):\n others.append(otheruser)\n \n friends = Friend.objects.filter(friend1=person)\n friendship = []\n for friend in friends:\n friendship.append(friend.friend2)\n others2 = []\n for otheruser in others:\n if (otheruser not in friendship):\n others2.append(otheruser)\n \n context = {\n 'person' : person,\n 'users' : others2,\n 'friends' : friendship\n }\n return render(request, 'friend2_app/dashboard.html', context)\n\ndef profile(request, id):\n show = User.objects.get(id=id)\n context = {\n 'user' : show\n }\n return render(request, 'friend2_app/profile.html', context)\n\ndef addfriend(request, id):\n User.themanager.add(request.session['user_id'], id)\n return redirect('/friends')\n\ndef removefriend(request, id):\n User.themanager.remove(request.session['user_id'], id)\n return redirect('/friends')\n\ndef logout(request):\n request.session.clear()\n return redirect('/')" }, { "alpha_fraction": 0.5586145520210266, "alphanum_fraction": 0.5681616067886353, "avg_line_length": 34.46456527709961, "blob_id": "8ac691a7415589cb1edf127bc81a55880b6a97c5", "content_id": "4790358448317265049e38e9271d79c530cc80de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4504, "license_type": "no_license", "max_line_length": 86, "num_lines": 127, "path": "/apps/friend2_app/models.py", "repo_name": "Stefany-Olivera/Python_Project", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\nfrom django.db import models\nimport re\nimport bcrypt\nfrom datetime import datetime\n\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')\n\nclass UserManager(models.Manager):\n def login(self, email, password):\n \n response={\n \"errors\":[],\n \"user\":None,\n \"valid\":True\n }\n\n if len(email) < 1:\n response[\"valid\"] = False\n response[\"errors\"].append(\"Email is required\")\n \n elif not EMAIL_REGEX.match(email):\n response[\"valid\"] = False\n response[\"errors\"].append(\"Invalid Email\")\n else:\n list_of_emails = User.objects.filter(email=email.lower())\n if len(list_of_emails) == 0:\n response[\"valid\"] = False\n response[\"errors\"].append(\"Email does not exist\")\n if len(password) < 8:\n response[\"valid\"] = False\n response[\"errors\"].append(\"Password must be 8 characters or more\")\n \n if response[\"valid\"]:\n if bcrypt.checkpw(password.encode(), list_of_emails[0].password.encode()):\n response[\"user\"] = list_of_emails[0]\n else:\n response[\"valid\"] = False\n response[\"errors\"].append(\"Incorrect Password\")\n return response\n \n def register(self, name, alias, email, password, confirm_password, bday):\n now=datetime.now()\n \n response={\n \"errors\":[],\n \"user\": None,\n \"valid\": True\n }\n if len(name) < 1:\n response[\"valid\"] = False\n response[\"errors\"].append(\"Name is required\")\n\n if len(alias) <1:\n response[\"valid\"] = False\n response[\"errors\"].append(\"Alias is required\")\n\n if len(email) < 1:\n response[\"valid\"] = False\n response[\"errors\"].append(\"Email is required\")\n \n elif not EMAIL_REGEX.match(email):\n response[\"valid\"] = False\n response[\"errors\"].append(\"Invalid Email\")\n else:\n list_of_emails=User.objects.filter(email=email)\n if len(list_of_emails) > 0:\n response[\"valid\"] = False\n response[\"errors\"].append(\"Email already exists\")\n if len(password) < 8:\n response[\"valid\"] = False\n response[\"errors\"].append(\"Password must be 8 characters or more\")\n \n if confirm_password != password:\n response[\"valid\"] = False\n response[\"errors\"].append(\"Password must match Confirm Password\")\n if len(bday) < 1:\n response[\"valid\"]=False\n response[\"errors\"].append('Bday is required!')\n elif now < datetime.strptime(bday,'%Y-%m-%d'):\n response[\"valid\"]=False\n response[\"errors\"].append('Bday cant be in the future!')\n\n if response[\"valid\"]:\n response[\"user\"] = User.objects.create(\n name=name,\n alias=alias,\n email=email.lower(),\n password=bcrypt.hashpw(password.encode(), bcrypt.gensalt()),\n bday=bday,\n )\n\n\n return response\n def add(self, user_id, friend_id):\n user = self.get(id=user_id)\n friend = self.get(id=friend_id)\n Friend.objects.create(friend1=user, friend2=friend)\n Friend.objects.create(friend1=friend, friend2=user)\n\n def remove(self, user_id, friend_id):\n user = self.get(id=user_id)\n friend = self.get(id=friend_id)\n relationship1 = Friend.objects.get(friend1=user, friend2=friend)\n relationship2 = Friend.objects.get(friend1=friend, friend2=user)\n relationship1.delete()\n relationship2.delete()\n\nclass User(models.Model):\n name = models.CharField(max_length=255)\n alias = models.CharField(max_length=255)\n email = models.CharField(max_length=255)\n password = models.CharField(max_length=255)\n bday = models.DateField()\n created_at = models.DateTimeField(auto_now_add = True)\n updated_at = models.DateTimeField(auto_now_add = True)\n\n themanager = UserManager()\n objects = UserManager()\n\nclass Friend(models.Model):\n friend1 = models.ForeignKey(User, related_name='asks')\n friend2 = models.ForeignKey(User, related_name='accepts')\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n objects = models.Manager()\n" }, { "alpha_fraction": 0.7445255517959595, "alphanum_fraction": 0.7591241002082825, "avg_line_length": 18.571428298950195, "blob_id": "24f641c8c85c8ce31e60ca75a784684b54c6eba8", "content_id": "dbb8afb54873ac5849bbb16eeb6f46daa4a84b80", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 39, "num_lines": 7, "path": "/apps/friend2_app/apps.py", "repo_name": "Stefany-Olivera/Python_Project", "src_encoding": "UTF-8", "text": "from __future__ import unicode_literals\n\nfrom django.apps import AppConfig\n\n\nclass Friend2AppConfig(AppConfig):\n name = 'friend2_app'\n" }, { "alpha_fraction": 0.5454071164131165, "alphanum_fraction": 0.56210857629776, "avg_line_length": 35.846153259277344, "blob_id": "70efeef2481b6b72dc5393de018d60ae5cb92abd", "content_id": "92b2e847f982e6bfb6dafaa2a9e1659f4d4af33d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1916, "license_type": "no_license", "max_line_length": 128, "num_lines": 52, "path": "/apps/friend2_app/migrations/0001_initial.py", "repo_name": "Stefany-Olivera/Python_Project", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10 on 2018-02-04 17:57\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.db.models.manager\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Friend',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=255)),\n ('alias', models.CharField(max_length=255)),\n ('email', models.CharField(max_length=255)),\n ('password', models.CharField(max_length=255)),\n ('bday', models.DateField()),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now_add=True)),\n ],\n managers=[\n ('themanager', django.db.models.manager.Manager()),\n ],\n ),\n migrations.AddField(\n model_name='friend',\n name='friend1',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='asks', to='friend2_app.User'),\n ),\n migrations.AddField(\n model_name='friend',\n name='friend2',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accepts', to='friend2_app.User'),\n ),\n ]\n" } ]
4
Matyu/Wind-River-Project
https://github.com/Matyu/Wind-River-Project
3fc1e5a264d7236418f31745f2dd871f774a2799
0c93c2a370c5de359a0c73634861d9530e695ad2
2e41a539fb9a83a933d636a2e61632cde8b45fef
refs/heads/master
2021-01-01T16:25:49.863709
2013-02-06T21:00:34
2013-02-06T21:00:34
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6016594171524048, "alphanum_fraction": 0.6203871965408325, "avg_line_length": 20.629060745239258, "blob_id": "4415ea76088e6e211687f6f14d99b2f2524ca375", "content_id": "be604247da12bfe494e749a6d02507c16e7cc9a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 12877, "license_type": "no_license", "max_line_length": 134, "num_lines": 585, "path": "/tourelle.ino", "repo_name": "Matyu/Wind-River-Project", "src_encoding": "UTF-8", "text": "// Le code source d'origine est celui de XAVIER Hinault, voir le site http://mon-club-elec.fr/\n// Il y a un ajout de code qui sera indiqué dans le programme \n\n// ------- Licence du code de ce programme ----- \n\n// This program is free software: you can redistribute it and/or modify\n\n// it under the terms of the GNU General Public License as published by\n\n// the Free Software Foundation, either version 3 of the License,\n\n// or any later version.\n\n// This program is distributed in the hope that it will be useful,\n\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\n// GNU General Public License for more details.\n\n// You should have received a copy of the GNU General Public License\n\n// along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\n\n// //////////////////// PRESENTATION DU PROGRAMME //////////////////// \n\n\n\n// -------- Que fait ce programme ? ---------\n\n /* Un servomoteur connecté à la carte Arduino est contrôlé\n\nà l'aide d'un shield Joystick (DFRobot). \n\nL'appui sur un bouton poussoir permet la réinitialisation \n\nde la position du servomoteur. */ \n\n\n\n// --- Fonctionnalités utilisées --- \n\n\n\n// Utilise la conversion analogique numérique 10 bits \n\n// Utilise la carte d'extension Arduino (shield) Joystick + BP x 2\n\n// Utilise un servomoteur\n\n\n\n// -------- Circuit à réaliser --------- \n\n\n\n// ******* ATTENTION : il est possible de connecter directement 2 ou 3 servomoteurs sur la carte Arduino\n\n// Connecter un servomoteur sur la broche 8\n\n// Connecter un servomoteur dur la brche 9\n\n// Enficher la carte d'extension Arduino (shield) Joystick + BP x 2 broche à broche sur la carte EasyCard\n\n// Enficher la carte EasyCard broche à broche sur la carte Arduino\n\n// les connexions sont réalisées broche à broche entre le module et la carte Arduino\n\n\n\n\n\n// /////////////////////////////// 1. Entête déclarative /////////////////////// \n\n// A ce niveau sont déclarées les librairies incluses, les constantes, les variables, les objets utiles...\n\n\n\n// --- Déclaration des constantes ---\n\n\n\n// --- Inclusion des librairies ---\n\n\n\n#include <Servo.h> // librairie pour servomoteur \n\n\n\n// --- Déclaration des constantes utiles ---\n\nconst int APPUI=LOW; // constante pour tester état BP\n\n\n\n//--- Constantes utilisées avec le servomoteur \n\nconst int ANGLE_MIN_TILT=0; // angle position MIN en degrés\n\nconst int POS_MIN_TILT=550; // largeur impulsion pour position ANGLE_MIN degrés du servomoteur\n\nconst int ANGLE_MIN_PAN= 0;\nconst int POS_MIN_PAN= 550;\n\n // par exemple POS_MIN=600 pour ANGLE_MIN=10° avec un futaba S3003 \n\n // ou POS_MIN=550 pour ANGLE_MIN=0 avec un futaba S3003\n\n\nconst int ANGLE_MAX_PAN = 180;\nconst int POS_MAX_PAN = 2450;\n\n// Ajout de nouvelles constantes par rapport au code d'origine\nconst int ANGLE_MAX_TILT=180; // angle position MAX en degrés\n\nint POS_MAX_TILT=2450; // largeur impulsion pour position ANGLE_MAX degrés du servomoteur\n\n // POS_MAS=2300 pour ANGLE_MIN=170° pour futaba s3003\n\n // ou POS_MAX=2400 pour ANGLE_MAX=172 pour futaba S3003\n\n\n\n// pour étalonner un servomoteur, voir la page : \n\n//http://www.mon-club-elec.fr/pmwiki_mon_club_elec/pmwiki.php?n=MAIN.ArduinoExpertSerieDepuisPCPositionServomoteur\n\n\n\n// --- Déclaration des constantes des broches E/S numériques ---\n\n\n\nconst int bpRouge=3; // Constante pour la broche 3\n\nconst int bpBleu=4; // Constante pour la broche 4\n\nconst int bpJoystick=5; // Constante pour la broche 5\n\n\n\nconst int broche_servoPan=8; // Constante pour la broche 8\n\nconst int broche_servoTilt=9; // Constante pour la broche 8\n\n\n\n// --- Déclaration des constantes des broches analogiques ---\n\n\n\nconst int axe1Joystick=0; // Constante pour la broche analogique 0\n\nconst int axe2Joystick=1; // Constante pour la broche analogique 1\n\n\n\n// --- Déclaration des variables globales ---\n\n\n\nint mesure_brute=0;// Variable pour acquisition résultat brut de conversion analogique numérique\n\nfloat mesuref=0.0;// Variable pour calcul résultat décimal de conversion analogique numérique\n\n\n\nint positionAxe1=0; // Variable pour acquisition résultat brut de conversion analogique numérique axe 1 Joystick\n\nint positionAxe2=0; // Variable pour acquisition résultat brut de conversion analogique numérique axe 2 Joystick\n\n\n\nint angleServoPan=90; // variable de position du servo Pan en degrés\n\nint angleServoTilt=90; // variable de position du servo Tilt en degrés\n\n\n\nint angleServoPan0=90; // variable de la dernière position du servo Pan en degrés\n\nint angleServoTilt0=90; // variable de la dernière position du servo Tilt en degrés\n\n\n\n// --- Déclaration des objets utiles pour les fonctionnalités utilisées ---\n\n\n\n//--- Création objet servomoteur \n\nServo mon_servoPan; // crée un objet servo pour contrôler le servomoteur 1\n\nServo mon_servoTilt; // crée un objet servo pour contrôler le servomoteur 2\n\n\n\n\n\n// ////////////////////////// 2. FONCTION SETUP = Code d'initialisation ////////////////////////// \n\n// La fonction setup() est exécutée en premier et 1 seule fois, au démarrage du programme\n\n\n\nvoid setup() { // debut de la fonction setup()\n\n\n// Ajout de la fonction de Serial\nSerial.begin(115200);\n\n\n// --- ici instructions à exécuter 1 seule fois au démarrage du programme --- \n\n\n\n// ------- Initialisation fonctionnalités utilisées ------- \n\n\n\n//--- Initialisation Servomoteur \n\nmon_servoPan.attach(broche_servoPan); // attache l'objet servo à la broche de commande du servomoteur Pan\n\nmon_servoTilt.attach(broche_servoTilt); // attache l'objet servo à la broche de commande du servomoteur Tilt\n\n\n\n\n\n// ------- Broches en sorties numériques ------- \n\n pinMode (broche_servoPan,OUTPUT); // Broche broche_servoPan configurée en sortie\n\n pinMode (broche_servoTilt,OUTPUT); // Broche broche_servoPan configurée en sortie\n\n\n\n// ------- Broches en entrées numériques ------- \n\n pinMode (bpRouge,INPUT); // Broche bpRouge configurée en entrée\n\n pinMode (bpBleu,INPUT); // Broche bpBleu configurée en entrée\n\n pinMode (bpJoystick,INPUT); // Broche bpJoystick configurée en entrée\n\n\n\n// ------- Activation si besoin du rappel au + (pullup) des broches en entrées numériques ------- \n\n\n\n// Les BP du shield Joystick + BPx2 dispose d'un rappel au plus sur le shield\n\n\n\n\n\n// ------- Initialisation des variables utilisées ------- \n\n\n\n// ------- Codes d'initialisation utile ------- \n\n\n\nmon_servoPan.writeMicroseconds(anglePan(angleServoPan)); // crée impulsion à partir valeur angle - plus précis que write()\n\nmon_servoTilt.writeMicroseconds(angleTilt(angleServoTilt)); // crée impulsion à partir valeur angle - plus précis que write()\n\n\n\n\n\n\n\n} // fin de la fonction setup()\n\n// ********************************************************************************\n\n// Création d'une nouvelle fonction de conversion, ajout par rapport au code source\n\nint convert(String str) {\n \n int nbre = 0; \n int tm = 1;\n \n for(int i = str.length() - 1; i >= 0; i--) {\n int e;\n \n switch(str[i]) {\n case '0':\n e = 0;\n break;\n case '1':\n e = 1;\n break;\n case '2':\n e = 2;\n break;\n case '3':\n e = 3;\n break;\n case '4':\n e = 4;\n break;\n case '5':\n e = 5;\n break;\n case '6':\n e = 6;\n break;\n case '7':\n e = 7;\n break;\n case '8':\n e = 8;\n break;\n case '9':\n e = 9;\n break;\n default:\n break;\n } \n \n nbre += (e * tm);\n tm *= 10;\n } \n \n return nbre;\n}\n\n//Ajout \n\nboolean isNumberorSemi_colon(char carac) {\n switch(carac) {\n case '0':\n case '1':\n case '2':\n case '3':\n case '4':\n case '5':\n case '6':\n case '7':\n case '8':\n case '9':\n case ';':\n return true;\n default:\n return false;\n } \n \n return false;\n \n}\n\n\n////////////////////////////////// 3. FONCTION LOOP = Boucle sans fin = coeur du programme //////////////////\n\n// la fonction loop() s'exécute sans fin en boucle aussi longtemps que l'Arduino est sous tension\n\nvoid loop(){ // debut de la fonction loop()\n\n// Les premières lignes de codes sont ajoutées\nint bt = 0;\nString tmp = \"\";\n\nboolean inkeyboard = false;\n\nif (Serial.available() > 0) {\n \n while(1){\n bt = Serial.read();\n if(isNumberorSemi_colon(char(bt)))\n tmp = tmp + char(bt);\n if(char(bt) == '\\n')\n break;\n \n }\n \n String str_axe_1 = tmp.substring(0, tmp.indexOf(';'));\n String str_axe_2 = tmp.substring(tmp.indexOf(';') + 1, tmp.length());\n \n if(str_axe_1 != \"\")\n positionAxe1 = convert(str_axe_1);\n else\n positionAxe1 = mon_servoTilt.read();\n if(str_axe_2 != \"\")\n positionAxe2 = convert(str_axe_2);\n else\n positionAxe2 = mon_servoPan.read();\n inkeyboard = true;\n}\n\nelse {\n \n positionAxe1=analogRead(axe1Joystick); // acquisition conversion analogique numérique sur broche analogique axe 1\n\n positionAxe2=analogRead(axe2Joystick); // acquisition conversion analogique numérique sur broche analogique axe 2\n //----- lecture position Joytstick\n inkeyboard = false;\n\n \n\n\nif (positionAxe2>700) {\n\n if (angleServoPan <= ANGLE_MAX_PAN)\n angleServoPan=angleServoPan+1; \n\n constrain(angleServoPan,ANGLE_MIN_PAN,ANGLE_MAX_PAN); \n\n}\n\n\n\nif (positionAxe2<300) {\n\n if (angleServoPan > 0)\n angleServoPan=angleServoPan-1; \n\n constrain(angleServoPan,ANGLE_MIN_PAN,ANGLE_MAX_PAN); \n\n}\n\n\n\nif (positionAxe1>700) { \n\n if (angleServoTilt <= ANGLE_MAX_TILT)\n angleServoTilt=angleServoTilt+1; \n\n constrain(angleServoTilt,ANGLE_MIN_TILT,ANGLE_MAX_TILT); \n\n}\n\n\n\nif (positionAxe1<300) {\n if (angleServoTilt > 0)\n angleServoTilt=angleServoTilt-1;\n\n constrain(angleServoTilt,ANGLE_MIN_TILT,ANGLE_MAX_TILT); \n\n}\n\n}\n\n//---- lecture état des BP du shield Joystick\n\nif (digitalRead(bpRouge)==APPUI) { // si appui BP rouge\n\n\n\n angleServoTilt=90; // réinitialise angle Tilt\n\n\n\n}\n\n\n\nif (digitalRead(bpBleu)==APPUI) { // si appui BP \n\n\n\n angleServoPan=90; // réinitialise angle PAN \n\n}\n\n\n\nif (digitalRead(bpJoystick)==APPUI) { // si appui BP joystick\n\n\n\n}\n\n\n\n//------------- mise à jour de la position du servomoteur --- \n\n// Ajout\n\nif(!inkeyboard) {\n\n\n if (angleServoPan!=angleServoPan0) {\n\n mon_servoPan.writeMicroseconds(anglePan(angleServoPan)); // crée impulsion à partir valeur angle - plus précis que write()\n\n angleServoPan0=angleServoPan; // mémorise dernière valeur angle prise en compte\n\n }\n\n\n\n\n\n if (angleServoTilt!=angleServoTilt0) { // si angleServoTilt a changé\n\n mon_servoTilt.writeMicroseconds(angleTilt(angleServoTilt)); // crée impulsion à partir valeur angle - plus précis que write()\n\n angleServoTilt0=angleServoTilt; // mémorise dernière valeur angle prise en compte\n\n }\n\n\n delay(6);\n\n\n\n // fixe la vitesse de mouvement du servo - entre 2 lecture analogique\n\n } else {\n if(positionAxe1 < 0)\n positionAxe1 = 0;\n if(positionAxe1 > 180)\n positionAxe1 = 180;\n if(positionAxe2 < 0)\n positionAxe2 = 0;\n if(positionAxe2 > 180)\n positionAxe2 = 180;\n \n mon_servoTilt.writeMicroseconds(angleTilt(positionAxe1));\n mon_servoPan.writeMicroseconds(anglePan(positionAxe2));\n \n \n }\n// Fin ajout\n\n\n//while(1); // stop loop\n\n\n} // fin de la fonction loop() - le programme recommence au début de la fonction loop sans fin\n\n// ********************************************************************************\n\n\n\n\n\n// ////////////////////////// FONCTIONS DE GESTION DES INTERRUPTIONS //////////////////// \n\n\n\n\n\n// ////////////////////////// AUTRES FONCTIONS DU PROGRAMME //////////////////// \n\n\n\n//------------- fonction calibrage impulsion servomoteur à partir valeur angle en degrés\n\n//Modification de la fonction angle et ajout de la nouvelle fonction angleTilt\n\nint anglePan(int val) {\n int impuls = 0;\n impuls= map(val, ANGLE_MIN_PAN, ANGLE_MAX_PAN, POS_MIN_PAN, POS_MAX_PAN);\n return impuls; \n \n}\n\nint angleTilt(int valeur_angle) { \n\n\n\n int impuls=0;\n\n\timpuls=map(valeur_angle,ANGLE_MIN_TILT,ANGLE_MAX_TILT,POS_MIN_TILT, POS_MAX_TILT);\n\n\treturn impuls; \n\n\n\n} // fin fonction impulsion servomoteur\n\n\n\n// ////////////////////////// Fin du programme //////////////////// \n\n" }, { "alpha_fraction": 0.5668431520462036, "alphanum_fraction": 0.6118464469909668, "avg_line_length": 22.06106948852539, "blob_id": "7d1c48fdfd94593e4060f97f477669da99b44cdc", "content_id": "e60dbf8eb554edf3c135fb453767ff14936b373a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3024, "license_type": "no_license", "max_line_length": 84, "num_lines": 131, "path": "/etape1", "repo_name": "Matyu/Wind-River-Project", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:Utf-8 -*\n\n\"\"\"\nPremière étape de l'interface\n\"\"\"\n\nimport sys\nfrom PyQt4 import QtGui\nfrom PyQt4 import QtCore\nimport serial\nimport time\n\ntm = time.clock()\ntm2 = time.clock()\n\n\nser = serial.Serial('/dev/ttyACM0', 115200)\napp = QtGui.QApplication(sys.argv)\n\nmain_window = QtGui.QWidget()\nmain_window.resize(500, 400)\nmain_window.setWindowTitle(\"Interface GUI pour controler carte Arduino\")\n\nlab1 = QtGui.QLabel(\"90 D\", main_window)\nlab2 = QtGui.QLabel(\"90 D\", main_window)\n\nlab1.move(300,80)\nlab2.move(130,210)\n\nsld_hb = QtGui.QSlider(QtCore.Qt.Horizontal, main_window)\nsld_dg = QtGui.QSlider(QtCore.Qt.Vertical, main_window)\n\nsld_hb.setGeometry(10, 140, 250, 50)\nsld_dg.setGeometry(265, 15, 50, 250)\n\naxe_x = QtGui.QLabel(main_window)\naxe_y = QtGui.QLabel(main_window)\nsld_hb.setValue(50)\nsld_dg.setValue(50)\n\nreset = QtGui.QPushButton(\"Reset\", main_window)\nreset.move(25, 200)\n\ncmd = QtGui.QLineEdit(main_window)\ncmd.setGeometry(10,300, 300, 40)\n\n\nsend = QtGui.QPushButton(\"Send\", main_window)\nsend.move(330, 310)\n\ndef rst():\n sld_hb.setValue(50)\n sld_dg.setValue(50)\n\ndef sender(): \n string = cmd.text()\n if not ser.isOpen():\n ser.open()\n string = string + \"\\n\"\n cmd.setText(\" \")\n ser.write(string)\n\ndef val_axe__changed(val):\n global tm2\n tm_local = time.clock()\n val = float(val)\n a = val * 180.0 / 100.0\n a = int(a)\n a = 180 - a\n lab2.setText(str(a) + \" D\")\n if tm_local - tm2 > 0.03:\n string = ';' + str(a) + \"\\n\"\n if not ser.isOpen():\n ser.open()\n ser.write(string)\n tm2 = time.clock()\n\ndef val_axe_changed(val):\n global tm\n tm_local = time.clock()\n val = float(val)\n a = val * 180.0 / 100.0\n a = int(a)\n lab1.setText(str(a) + \" D\")\n if tm_local- tm > 0.03:\n string = str(a) + \";\" + \"\\n\"\n if not ser.isOpen():\n ser.open()\n ser.write(string)\n tm = time.clock()\n\ndef val_axe_changed_():\n val = sld_dg.value()\n val = float(val)\n a = val * 180.0 / 100.0\n a = int(a)\n lab1.setText(str(a) + \" D\")\n string = str(a) + ';' + \"\\n\"\n if not ser.isOpen():\n ser.open()\n ser.write(string)\n\ndef val_axe__changed_():\n val = sld_hb.value()\n val = float(val)\n a = val * 180.0 / 100.0 \n a = int(a)\n a = 180 - a\n lab2.setText(str(a) + \" D\")\n string = ';' + str(a) + \"\\n\"\n if not ser.isOpen():\n ser.open()\n ser.write(string)\n\n \nQtCore.QObject.connect(sld_dg, QtCore.SIGNAL('sliderReleased()'), val_axe_changed_)\nQtCore.QObject.connect(sld_hb, QtCore.SIGNAL('sliderReleased()'), val_axe__changed_)\nQtCore.QObject.connect(sld_dg, QtCore.SIGNAL('valueChanged(int)'), val_axe_changed)\nQtCore.QObject.connect(sld_hb, QtCore.SIGNAL('valueChanged(int)'), val_axe__changed)\nQtCore.QObject.connect(send, QtCore.SIGNAL('clicked()'), sender)\nQtCore.QObject.connect(reset, QtCore.SIGNAL('clicked()'), rst)\n\nmain_window.show()\n\ndef close():\n ser.close()\n return app.exec_()\n\n\nsys.exit(close())\n\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.692307710647583, "avg_line_length": 27.16666603088379, "blob_id": "9b3cf39fddd74e549552f993e57c06c56f0210d6", "content_id": "74acb0ba8b851797b7f0ab910330453a1c71230c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 169, "license_type": "no_license", "max_line_length": 83, "num_lines": 6, "path": "/README.md", "repo_name": "Matyu/Wind-River-Project", "src_encoding": "UTF-8", "text": "Wind-River-Project\n==================\n\nIn this repository there are all sources codes of my project in Wind River Systems.\n\nVisit my blog : http://matyusblog.unblog.fr/\n" }, { "alpha_fraction": 0.5791757106781006, "alphanum_fraction": 0.6290672421455383, "avg_line_length": 18.361345291137695, "blob_id": "f3c251da359c23d1e7ed6f01c8ad362d76f6b651", "content_id": "2ec9bd162ae2e416ab99f85b2f4a6d1548faa5e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2307, "license_type": "no_license", "max_line_length": 67, "num_lines": 119, "path": "/etape2", "repo_name": "Matyu/Wind-River-Project", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:Utf-8 -*\n\n\"\"\"\nSeconde version de l'interface \n\"\"\"\n\nimport sys\nfrom PyQt4 import QtGui\nfrom PyQt4 import QtCore\nimport serial\nimport time\nimport math\n\napp = QtGui.QApplication(sys.argv)\n\nser = serial.Serial('/dev/ttyACM0', 115200, timeout=1)\n\ndef prop(number):\n numer = float(number)\n\treturn number / 5.0\n\ndef ret_y(number):\n\treturn 350 - number # !\n\ndef close():\n\tser.close()\n\treturn app.exec_()\n\nclass Interface(QtGui.QWidget):\n\tdef __init__(self):\n\t\tQtGui.QWidget.__init__(self)\n\t\tself.resize(240, 350) # !\n\t\tself.setMaximumSize(240, 350)\n\t\tself.setMinimumSize(240, 350)\n\t\tself.setWindowTitle(\"Contrôle spot par 2D\")\n\n\tdef paintEvent(self, paintEvent):\n\t\tpainter = QtGui.QPainter(self)\n\t\tcolor = QtGui.QColor(56,78,123)\n\t\tcolor.setBlue(145)\n\t\tcolor.setGreen(23)\n\t\tpainter.fillRect(0, 0, 240, 150, color) #!\n\t\tcolor = QtGui.QColor(23,89,45)\n\t\tpainter.fillRect(0, 150, 240, 200, color) #!\n\n\tdef mouseReleaseEvent(self, mouseEvent):\n\t\tAC = -1\n\t\tBA = -1\n\t\tBC = -1\n\t\tangle_horizontal = 0\n\t\tif mouseEvent.x() < 120: # !\n\t\t\tAC = 120 - mouseEvent.x()\t\n\t\telif mouseEvent.x() == 120: #!\n\t\t\tAC = 0\n\t\telse:\n\t\t\tAC = mouseEvent.x() - 120\n\t\tif mouseEvent.x() > 120:\n\t\t\tBC = ret_y(mouseEvent.y())\n\t\t\tBA = BC * BC + AC * AC\n\t\t\tBA = math.sqrt(BA)\n\t\telse:\n\t\t\tBA = ret_y(mouseEvent.y())\n\t\t\tBC = BA * BA + AC * AC\n\t\t\tBC = math.sqrt(BC)\n\t\tAC = prop(AC)\n\t\tBA = prop(BA)\n\t\tBC = prop(BC)\n\t\tif mouseEvent.x() > 120: #!\n\t\t\tangle_horizontal = math.acos(AC / BA)\n\t\telse:\n\t\t\tangle_horizontal = math.acos(AC / BC)\n\t\tangle_horizontal = math.degrees(angle_horizontal)\n\n\t\tangle_vertical = 0\n\n\t\tDE = -1\n\t\tEF = -1\n\t\tDF = -1\n\n\n\t\tEF = 350 - mouseEvent.y() # !\n\t\tDE = 160 # !\n\t\tDF = EF * EF + DE * DE\n\t\tDF = math.sqrt(DF)\n\n\t\tDF = prop(DF)\n\t\tEF = prop(EF)\n\t\tDE = prop(DE)\n\t\t\n\n\t\tangle_vertical = math.acos(DE / DF)\n\n\n\n\t\tangle_vertical = math.degrees(angle_vertical)\n\n\t\tif mouseEvent.x() > 120: #!\n\t\t\tangle_horizontal = 180 - angle_horizontal\n\n\t\tangle_horizontal = 180 - angle_horizontal\n\n\n\t\tangle_horizontal = int(angle_horizontal)\n\t\tangle_vertical = int(angle_vertical)\n\t\t\n\t\tangle_vertical += 1\n\t\tangle_horizontal += 1\n\n\n\t\tstring = str(angle_vertical) + ';' + str(angle_horizontal) + \"\\n\"\n\t\tif not ser.isOpen():\n\t\t\tser.open()\n\t\tser.write(string)\n\ninterface = Interface()\ninterface.show()\n\nsys.exit(close()) \n" }, { "alpha_fraction": 0.42200326919555664, "alphanum_fraction": 0.4972632825374603, "avg_line_length": 22.727272033691406, "blob_id": "51a7d18701030f319474c2322b82b07b4569c9c1", "content_id": "99c44697c11ad57e1a4231e09479f80424e021b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3660, "license_type": "no_license", "max_line_length": 180, "num_lines": 154, "path": "/final", "repo_name": "Matyu/Wind-River-Project", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*\n\n\"\"\"\nEtape finale de l'interface\n\"\"\"\n\nimport math\nimport serial\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom PyQt4 import QtGui\nfrom PyQt4.QtOpenGL import *\n\nser = serial.Serial('/dev/ttyACM0', 115200, timeout=1)\n\nHAUTEUR = 90 * 5# Hauteur fenêtre\nLARGEUR = 180 *5# Largeur fenêtre\n\n\n\nclass Widget(QGLWidget):\n '''\n Widget for drawing two spirals.\n '''\n \n def __init__(self, parent):\n QGLWidget.__init__(self, parent)\n self.setMinimumSize(LARGEUR, HAUTEUR)\n\n def paintGL(self):\n '''\n Drawing routine\n '''\n \n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glLoadIdentity()\n glColor(0.0, 0.7, 0.0)\n\n glBegin(GL_QUADS)\n glVertex(0.36, 0.30, 0.35)\n glVertex(0.36, 0.90, 0.35)\n glVertex(-0.36, 0.90, 0.35)\n glVertex(-0.36, 0.30, 0.35)\n glEnd() \n\n glColor(0.5, 0.0, 0.0)\n\n glBegin(GL_POLYGON)\n glVertex(0.36, 0.30, 0.35)\n glVertex(-0.36, 0.30, 0.35)\n glColor(1.0, 0.0, 0.0)\n glVertex(-1.0, 0.0, 0.0)\n glVertex(-1.0, -1.0, 0.0)\n glVertex(1.0, -1.0, 0.0)\n glVertex(1.0, 0.0, 0.0)\n glEnd()\n\n glColor(0.0, 0.0, 0.5)\n\n glBegin(GL_QUADS)\n glVertex(0.36, 0.90, 0.35)\n glColor(0.0, 0.0, 1.0)\n glVertex(1.0, 0.70, 0.0)\n glVertex(1.0, 0.0, 0.0)\n glColor(0.0, 0.0, 0.5)\n glVertex(0.36, 0.30, 0.35)\n glEnd()\n\n \n\n glBegin(GL_QUADS)\n glVertex(-0.36, 0.30, 0.35)\n glVertex(-0.36, 0.90, 0.35)\n glColor(0.0, 0.0, 1.0)\n glVertex(-1.0, 0.60, 0.0)\n glVertex(-1.0, 0.0, 0.0)\n glEnd()\n\n glFlush()\n\n def resizeGL(self, w, h):\n '''\n Resize the GL window \n '''\n \n glViewport(0, 0, w, h)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(40.0, 1.0, 1.0, 30.0)\n\n def initializeGL(self):\n '''\n Initialize GL\n '''\n \n # set viewing projection\n glClearColor(0.0, 0.0, 0.0, 1.0)\n glClearDepth(1.0)\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(40.0, 1.0, 1.0, 30.0)\n\n \n def mouseReleaseEvent(self, mouseEvent):\n x = -1\n y = -1\n z = -1\n x, y, z = gluUnProject(mouseEvent.x(), mouseEvent.y(), 0, model=glGetDoublev(GL_MODELVIEW_MATRIX), view=glGetIntegerv(GL_VIEWPORT), proj=glGetDoublev(GL_PROJECTION_MATRIX))\n x *= 100\n y *= 100\n if x < 0:\n x = x * -1\n x = 100 - x\n else:\n x += 100\n if y < 0:\n y = y * -1\n y = 100 - y\n else:\n y += 100\n\n x = x * 180 / 200\n y = y * 90 / 200\n print(\"x =>\", str(x), \" -- y =>\", str(y)) \n x = int(x)\n y = int(y)\n x = 180 - x\n y = 90 - y\n string = str(y) + ';' + str(x) + \"\\n\"\n if not ser.isOpen():\n ser.open()\n ser.write(string)\n\n\n# You don't need anything below this\nclass Window(QtGui.QMainWindow):\n ''' Example class for using SpiralWidget'''\n \n def __init__(self):\n QtGui.QMainWindow.__init__(self)\n widget = Widget(self) \n self.setCentralWidget(widget)\n self.resize(LARGEUR, HAUTEUR)\n self.setMinimumSize(LARGEUR, HAUTEUR)\n self.setMaximumSize(LARGEUR, HAUTEUR)\n \nif __name__ == '__main__':\n app = QtGui.QApplication(['Affichage 3D et contrôle spot'])\n window = Window()\n window.show()\n app.exec_()\n ser.close()\n" } ]
5
hazemmm77/ETL
https://github.com/hazemmm77/ETL
6e38de3a023eb5194f9a69dc41f8a2ac2d7af40a
dda2aeeae4409849f7b4ab7c9a1f1c1d5fa8b778
f86029fbddc08f0592cdcf4b585e644668ba2bab
refs/heads/master
2023-05-12T16:59:24.338229
2021-05-28T13:16:32
2021-05-28T13:16:32
371,705,864
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7399598360061646, "alphanum_fraction": 0.7399598360061646, "avg_line_length": 46.47618865966797, "blob_id": "1dceb4b0c14372983a402efc6e52125179fd7289", "content_id": "15549a110cc3095abacd368282ced11669ee6fa1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 996, "license_type": "no_license", "max_line_length": 119, "num_lines": 21, "path": "/README.md", "repo_name": "hazemmm77/ETL", "src_encoding": "UTF-8", "text": "Sparkify wants to analyze the data they've been collecting on songs and user activity on their new music streaming app.\nthe analytics team is particularly interested in understanding what songs users are listening to.\n I create a database schema and ETL pipeline for this analysis to be optimized for queries on song play analysis.\n Using the song and log datasets\n \n Fact Table\n **songplays** - records in log data associated with song plays i.e. records with page NextSong\n _songplay_id, start_time, user_id, level, song_id, artist_id, session_id, location, user_agent_\n\n Dimension Tables\n **users** - users in the app\n _user_id, first_name, last_name, gender, level_\n \n **songs** - songs in music database\n _song_id, title, artist_id, year, duration_\n \n **artists** - artists in music database\n _artist_id, name, location, latitude, longitude_\n \n **time** - timestamps of records in songplays broken down into specific units\n _start_time, hour, day, week, month, year, weekday_" }, { "alpha_fraction": 0.6635711193084717, "alphanum_fraction": 0.6635711193084717, "avg_line_length": 45.47058868408203, "blob_id": "4990793b44afad387c5b71e95b460654083cde44", "content_id": "2438e47260254783ceb2446cfdc8776b3dde837c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2369, "license_type": "no_license", "max_line_length": 145, "num_lines": 51, "path": "/sql_queries.py", "repo_name": "hazemmm77/ETL", "src_encoding": "UTF-8", "text": "# DROP TABLES\n\nsongplay_table_drop = \"DROP TABLE IF EXISTS songplays;\"\nuser_table_drop = \"DROP TABLE IF EXISTS users \"\nsong_table_drop = \"DROP TABLE IF EXISTS songs \"\nartist_table_drop = \"DROP TABLE IF EXISTS artists\"\ntime_table_drop = \"DROP TABLE IF EXISTS time\"\n\n# CREATE TABLES\n\nsongplay_table_create = (\"CREATE TABLE IF NOT EXISTS songplays (songplay_id SERIAL PRIMARY KEY,start_time time,user_id int,level varchar,\\\n song_id varchar,artist_id varchar,session_id varchar,user_agent varchar,location varchar)\")\n\nuser_table_create = (\"CREATE TABLE IF NOT EXISTS users(user_id varchar, first_name varchar,last_name varchar,gender varchar,\\\n level varchar)\")\n\nsong_table_create = (\"CREATE TABLE IF NOT EXISTS songs(song_id varchar,title varchar,artist_id varchar,year int,duration float)\")\n\nartist_table_create = (\"CREATE TABLE IF NOT EXISTS artists(artist_id varchar,name varchar,location varchar,latitude varchar,longitude varchar)\" )\n\ntime_table_create = (\"CREATE TABLE IF NOT EXISTS time (start_time time,hour varchar,day int,week int,month int,year int,\\\n weekday int)\")\n\n# INSERT RECORDS\n\nsongplay_table_insert = (\"INSERT INTO songplays (start_time ,user_id,level,\\\n song_id,artist_id,session_id ,user_agent ,location ) \\\n VALUES (%s, %s, %s,%s,%s,%s,%s,%s)\")\n\nuser_table_insert = (\"INSERT INTO users(user_id, first_name,last_name ,gender ,level )\\\n VALUES (%s, %s, %s,%s,%s)\")\n\nsong_table_insert = (\"INSERT INTO songs (song_id,title,artist_id,year,duration)\\\n VALUES (%s, %s, %s,%s,%s)\")\n\nartist_table_insert = (\"INSERT INTO artists(artist_id,name,location ,latitude,longitude)\\\n VALUES (%s, %s, %s,%s,%s)\")\n\n\ntime_table_insert = (\"INSERT INTO time (start_time,hour,day,week,month,year,weekday)\\\n VALUES (%s, %s, %s,%s,%s,%s,%s)\")\n\n# FIND SONGS\n\nsong_select = (\"select s.song_id,s.artist_id from (songs s join artists a ON s.artist_id=a.artist_id)\\\n where s.title=%s and a.name=%s and s.duration=%s\")\n\n# QUERY LISTS\n\ncreate_table_queries = [songplay_table_create, user_table_create, song_table_create, artist_table_create, time_table_create]\ndrop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]" } ]
2
mdenolle/Subevents2019
https://github.com/mdenolle/Subevents2019
0f7c440ec9941f859d6d0071b6350e96c377b405
26c1d125727c52f8ef4f3635b9912b431882b0bd
e4609adfc5040f7d0bc3f82f9a4ce7a9101087f9
refs/heads/master
2020-05-29T21:47:33.747241
2020-03-09T20:13:51
2020-03-09T20:13:51
189,392,775
0
3
null
null
null
null
null
[ { "alpha_fraction": 0.585563063621521, "alphanum_fraction": 0.6521596312522888, "avg_line_length": 39.45478820800781, "blob_id": "854975a64281add9ed30a038f1b063cccc6c3af5", "content_id": "3db22cfcc82fada61a82fdece66b0e63504d3748", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15211, "license_type": "no_license", "max_line_length": 164, "num_lines": 376, "path": "/subevent_GRL_script.py", "repo_name": "mdenolle/Subevents2019", "src_encoding": "UTF-8", "text": "## DISCLAIMER\n\n# This program allows for the peak detection, gaussian fitting and parameters extractions of the subevents, as performed in Danre et al. (2019)\n# Here we take the example of all Source Time Functions provided by SCARDEC (not sorted by focal mechanism, see the link : http://scardec.projects.sismo.ipgp.fr/). \n\n# For information about SCARDEC database, see Vallee et al. 2011 and Vallee et Douet 2016\n# The original version of the code was done by Philippe Danre.\n# The code has been modified by Marine Denolle, last version June 1st 2019 ([email protected])\n##\n## import modules\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys,os, glob\nimport os.path\nimport matplotlib.pyplot as plt\nfrom scipy import stats\npi=np.pi\n\n\n\n\n # define a guaussian function\ndef gaussienne(scale,center,standard,time_vector) :\n '''\n Returns a Gaussian Function, centered on [center] and with a standard deviantion (related to width of the curve) of [std].\n \n Time_vector must be an array\n \n For more :\n \n - Width at half maximum is 2.355*[std]. Provides an useful time scale .\n '''\n return (np.exp(-(time_vector-center)**2/(2*standard*standard))/(standard*np.sqrt(2*pi)))*scale\n\n\n\n\ndt=0.0703 # s, sampling space of SCARDEC\n\n\n# read list of the ASCII files that have the SCARDEC database.\npath='/Users/marinedenolle/Dropbox/SOURCE/SCARDEC/FCTS/*/*moy*'\nquakelist=glob.glob(path) # make a list of filenames\n\n\n# variable initization\nMs = np.zeros(shape=(len(quakelist),40),dtype=np.float64)\nTsub=np.zeros(shape=(len(quakelist),40),dtype=np.float64)\nDsub=np.zeros(shape=(len(quakelist),40),dtype=np.float64)\nMw = np.zeros(len(quakelist))\ndepth = np.zeros(len(quakelist))\ndip = np.zeros(len(quakelist))\nFM = np.zeros(len(quakelist))\nM0 = np.zeros(len(quakelist))\nmo1 = np.zeros(len(quakelist))\nMM0 = np.zeros(len(quakelist))\nTd = np.zeros(len(quakelist))\nerr = np.zeros(len(quakelist))\nMw = np.zeros(len(quakelist))\nNsub = np.zeros(len(quakelist),dtype=np.int)\n\nfor i,filename in enumerate(quakelist): # loop through each quake\n\n # read SCARDEC files\n piplot=np.loadtxt(filename,skiprows=2)\n opened=open(filename)\n opened.readline()\n list=opened.readline().split(' ') # read lines of parameters\n mo1[i]=(float(list[1])) # extract moment\n depth[i]=(float(list[0])) # extract depth\n dip[i]=(float(list[4])) # extract dip\n r1=float(list[5]);r2=float(list[8]) # extract rake\n # use Shearer et al, 2006 to parameterize the focal mechanism type.\n if abs(r1)>90:r1=(180-abs(r1))*(r1/abs(r1))\n if abs(r2)>90:r2=(180-abs(r2))*(r2/abs(r2))\n if abs(r1)<abs(r2):\n FM[i]=r1/90\n else:\n FM[i]=r2/90\n\n # read STF:\n time=np.zeros(len(piplot));rate=np.zeros(len(piplot)) # initialize time and moment-rate vectors\n for ii in range(len(piplot)) : # read each time stamp\n time[ii]=(piplot[ii][0])\n rate[ii]=(piplot[ii][1]) \n # find index of positive time and reliable amplitudes.\n I=np.where( (rate>=0.001*np.max(rate)) & (time>=0)) [0]\n Td[i]=(time[I[-1]]) # duration of quake\n M0[i]=(np.trapz(rate[I],x=time[I])) # moment calculated from integrating the STF\n rate=rate/M0[i] # We normalize the STF\n rate0=rate # STF that will not undergo the Gaussian substractions\n\n sub=0 # initially, no peak detected => 0 peaks\n gauss_final=np.zeros(len(rate)) # Final Gaussian-built STF, of the same size as 'rate'\n for el in I : # go through time\n if rate[el-1]<rate[el] and rate[el]>rate[el+1] and rate[el]>(0.1*max(rate0)) and time[el]>0 : # peak detection / default = 0.10 for the min. value of peak\n error0=1e99 # initial error for the grid fit\n std0=0.\n for std in np.linspace(0.01/2.335,300/2.335,700) : # grid fit\n gauss=gaussienne(rate[el],time[el],std,time)\n gauss=gauss*rate[el]/max(gauss) \n error=np.sum((rate[el-5:el+5]-gauss[el-5:el+5])**2)\n if error<error0 :\n std0=std\n error0=error\n gauss0=gauss\n# # Computation of the event's magnitude given the subevent's magnitude\n # if duration is greater than 1s and shorter than entire source duration\n if std0>1/4 and 4*std0< 1.2*Td[i]:\n gauss_final=gauss_final+gauss0 # sum up the subevent gaussian\n rate=rate-gauss0 # make residual\n sub+=1 # increment subevent\n\n Ms[i,sub-1]=np.trapz(gauss0,x=time)*M0[i] # store moment of each subevent\n Tsub[i,sub-1]=time[el] # store time at which it occurs\n Dsub[i,sub-1]=std0*2*np.sqrt(2*np.log(10)) # duration of subevent\n \n MM0[i]=(np.trapz(gauss_final,x=time)*M0[i] ) # recover reconstructed moment \n Nsub[i]=(sub) # store number of subevents for that quake\n err[i]=(MM0[i]/M0[i]) # store error between reconstructed and true moment.\n Mw[i]=(2/3*np.log10(M0[i])-6.07) # store moment magnitude\n\n# store in variable. \nnp.savez('allvar',M0=M0,Ms=Ms,MM0=MM0,Nsub=Nsub,Tsub=Tsub,Dsub=Dsub,Td=Td,FM=FM,depth=depth,err=err,Mw=Mw)\n\n\n# the following section plots figures in the main paper. you can stop here.\n# for questions regarding the plotting, contact [email protected] (and check stackoverflow...)\n\n\n\n## READ SCARDEC FIT\ndata=np.load('allvar.npz')\nM0=data['M0'];Ms=data['Ms']\nMM0=data['MM0'];Nsub=data['Nsub'];Nsub=Nsub.astype(np.int)\nTsub=data['Tsub'];Dsub=data['Dsub']\nTd=data['Td'];FM=data['FM']\ndepth=data['depth']\nerr=data['err']\nMw=data['Mw']\n\n## READ SIMULATION FIT (done with test_simulations.py)\ndata=np.load('allvar_simulations_G8.npz')\nM0_sim=data['M0'];Ms_sim=data['Ms']\nMM0_sim=data['MM0'];Nsub_sim=data['Nsub'];Nsub_sub=Nsub_sim.astype(np.int)\nTsub_sim=data['Tsub'];Dsub_sim=data['Dsub']\nTd_sim=data['Td']\n\n# indexes of those that did not rupture beyond the fault.\nfidindez='/Users/marinedenolle/Dropbox/GROUP_PROJECTS/DANRE_DENOLLE_STRIKE_SLIP/good_indx_8.dat'\nID=open(fidindez,'r').readlines()\nindx=np.zeros(len(ID),dtype=np.int)\nfor ii,i1 in enumerate(ID):\n indx[ii]=int(str(i1.split('\\n')[0]))\n\n\n# rearrange the variables to flatten with subevents:\n# #1 simulations\nmmm0_sim=[];mmms_sim=[];mmsub_sim=[]\nfor i in range(len(M0_sim)):\n if len(np.where(i==indx)[0])>=1: # only keep those that fit within the fault.\n for ii in range(Nsub_sim[i]):\n mmm0_sim.append(M0_sim[i])\n mmms_sim.append(Ms_sim[i,ii])\n mmsub_sim.append(Nsub_sim[i])\n \n# #2: scardec\nmmm0=[];mmms=[];mmsub=[];fmm=[];nsuub=[]\nfor i in range(len(quakelist)):\n for ii in range(Nsub[i]):\n mmm0.append(M0[i])\n mmms.append(Ms[i,ii])\n mmsub.append(Nsub[i])\n fmm.append(FM[i])\n nsuub.append(Nsub[i])\n \n\n\n##### FIGURE 2a: PLOT SCARDEC SUBEVENTS ###############\nplt.figure(figsize=(11,4.5))\nII=np.where(Nsub!=0)[0]\nII2=np.where( (FM[II]>=-0.5) & (FM[II]<=0.5)) [0]\nII3=np.where( (FM[II]<-0.5) | (FM[II]>0.5)) [0]\nII21=np.where( (FM[II]>=-0.5) & (FM[II]<=0.5) & (M0[II]>1.68E+19)) [0]\nII22=np.where( (FM[II]>=-0.5) & (FM[II]<=0.5) & (M0[II]<=1.68E+19)) [0]\nII32=np.where( ((FM[II]<-0.5) | (FM[II]>0.5)) & ( (M0[II]>=5E20)&(M0[II]<=2E22)) ) [0]\nslope, intercept, r_value, p_value, std_err = stats.linregress(np.log10(M0[II[II2]]),Nsub[II[II2]]) # all strike slip\nslope0, intercept0, r_value, p_value, std_err = stats.linregress(np.log10(M0[II[II21]]),Nsub[II[II21]]) # all ss greater than 6.5\nslope02, intercept02, r_value, p_value, std_err = stats.linregress(np.log10(M0[II[II22]]),Nsub[II[II22]]) #all ss smaller than 6.5\nslope1, intercept1, r_value1, p_value1, std_err1 = stats.linregress(np.log10(M0[II[II3]]),Nsub[II[II3]]) # all dip slip\nslope12, intercept12, r_value1, p_value1, std_err1 = stats.linregress(np.log10(M0[II[II32]]),Nsub[II[II32]]) # all greater than 7.8 but tohoku\n\n\nMwbin=np.linspace(6,9,4).astype(np.int)\nM0bin=10**((3/2)*(Mwbin+6.07))\nii=np.where(np.asarray(nsuub)>0)[0].astype(np.int)\nii1=np.where(np.asarray(fmm)>=-0.5 )[0]\nii1=np.where( (np.asarray(fmm)[ii]>=-0.5) & (np.asarray(fmm)[ii]<=0.5)) [0]\nii1=ii[ii1]\nii2=np.where( (np.asarray(fmm)[ii]<-0.5) | (np.asarray(fmm)[ii]>0.5)) [0]\nii2=ii[ii2]\nplt.subplot(121)\nplt.scatter(np.asarray(mmm0)[ii2],np.asarray(mmsub)[ii2],edgecolor=None,c='b',alpha=0.3,s=30)\nplt.scatter(np.asarray(mmm0)[ii1],np.asarray(mmsub)[ii1],edgecolor=None,c='r',alpha=0.1,s=24)\nplt.text(3E21,15.5,'slope='+str(round(slope*10)/10),color='r',fontsize=14,rotation=25)\nplt.text(3E21,5.5,'slope='+str(round(slope1*10)/10),color='b',fontsize=14,rotation=10)\nplt.xscale('log')\nplt.grid(True,linewidth=0.25)\nplt.rcParams['axes.axisbelow'] = True\nplt.xlabel('$M_0$ (Nm)',fontsize=14);plt.ylabel('Number of subevents',fontsize=14)\nplt.ylim(0,30);plt.xlim(1E17,1E23)\nplt.semilogx(M0,intercept + slope*np.log10(M0),linewidth=3,color='r')\nplt.semilogx(M0,intercept1 + slope1*np.log10(M0),linewidth=3,color='b')\nplt.title('a)',loc='left')\nax2=plt.twiny()\nax2.set_xticks(np.array((np.log10(M0bin)-17)/6))\nax2.set_xticklabels(Mwbin.astype(str))\nax2.set_xlabel('$M_W$')\nax2.xaxis.set_label_coords(1, 1.05)\n\n\n\n#### FIGURE 2b : PLOT SIMULATED SUBEVENTS ###############\nplt.subplot(122)\nplt.scatter(M0_sim,Nsub_sim,edgecolor=None,c='b',alpha=0.5,s=20)\nplt.xscale('log')\nplt.ylim((0,30))\nplt.grid(True,linewidth=0.25)\nplt.title('b)',loc='left')\nplt.yticks([5,10,15,20,25,30])\nplt.xlabel('$M_0$ (Nm/m)',fontsize=14);plt.ylabel('Number of subevents',fontsize=14)\nslope, intercept, r_value, p_value, std_err = stats.linregress(np.log10(mmm0_sim),mmsub_sim)\nplt.semilogx(M0_sim,intercept + slope*np.log10(M0_sim),linewidth=3,color='g')\nplt.text(2E14,6,'slope='+str(round(slope*10)/10),color='g',fontsize=14,rotation=10)\nplt.savefig('Figure2.pdf')\n\n##### FIGURE 3: PLOT MOMENT2MOMENT ###############\nMwbin=np.linspace(6,9,4).astype(np.int)\nM0bin=10**((3/2)*(Mwbin+6.07))\nMwbin1=np.linspace(5,9,18)\nM0bin1=10**((3/2)*(Mwbin1+6.07))\n# find the median\nmedMs=[];medM0=[]\nfor i in range(3,len(Mwbin1)):\n ik=np.where((mmm0<=M0bin1[i]) & (mmm0>M0bin1[i-1]))[0]\n medMs.append(10**(np.median(np.log10(np.asarray(mmms)[ik]))))\n medM0.append(10**(np.mean(np.log10(M0bin1[i-1:i]))))\n\n\nfig=plt.figure(figsize=(11,4.5))\nplt.subplot(121)\n# SCARDEC\nplt.grid(True,linewidth=0.25)\n# plt.axisbelow(True)\nplt.scatter(mmm0,mmms,c='b',edgecolor='k',alpha=0.3)\nplt.xscale('log');plt.yscale('log')\nplt.xlim(1E17,1E23);plt.ylim(1E15,1E23)\nplt.xlabel('$M_0$ (Nm)',fontsize=14)\nplt.ylabel('$M_S$ (Nm)',fontsize=14)\nslope, intercept, r_value, p_value, std_err = stats.linregress(np.log10(mmm0),np.log10(mmms))\n\nprint('regression for scaling with moments')\nprint(slope,intercept,p_value)\nplt.text(1E21,1E16,'slope='+str(round(slope*100)/100),color='g',fontsize=14)\nplt.loglog(mmm0,np.power(10,intercept + slope*np.log10(mmm0)),linewidth=3,color='g')\nplt.title('a) ',loc='left',fontsize=14)\nplt.loglog(np.array([1E17,1E23]),np.array([1E17,1E23]),linewidth=1,color='r')\nplt.loglog(np.array([1E17,1E23]),np.array([1E17,1E23])/10,linewidth=1,color='r')\nplt.loglog(np.array([1E17,1E23]),np.array([1E17,1E23])/100,linewidth=1,color='r')\n\nplt.scatter(medM0,medMs,marker='s',c='orange',edgecolor='k',s=40,zorder=10)\n\nplt.text(1E22,1.5E22,'r = 1',color='r',fontsize=14,rotation=35)\nplt.text(1E22,2E21,'r = 10',color='r',fontsize=14,rotation=35)\nplt.text(1E22,4E20,'r = 100',color='r',fontsize=14,rotation=35)\nax2=plt.twiny()\nax2.set_xticks(np.array((np.log10(M0bin)-17)/6))\nax2.set_xticklabels(Mwbin.astype(str))\nax2.set_xlabel('$M_W$')\nax2.xaxis.set_label_coords(1, 1.05)\n\n\n\nM0bin1=np.logspace(14.5,18,8)\n# find the median\nmedMs=[];medM0=[]\nfor i in range(1,len(M0bin1)):\n ik=np.where((mmm0_sim<=M0bin1[i]) & (mmm0_sim>M0bin1[i-1]))[0]\n medMs.append(10**(np.median(np.log10(np.asarray(mmms_sim)[ik]))))\n medM0.append(10**(np.mean(np.log10(M0bin1[i-1:i]))))\n\n\nplt.subplot(122)\nplt.scatter(mmm0_sim,mmms_sim,edgecolor='k',c='b',alpha=0.5,s=20)\nplt.xscale('log');plt.yscale('log')\nplt.grid(True,linewidth=0.25)\nplt.title('b)',loc='left')\nplt.xlabel('$M_0$ (Nm/m)',fontsize=14);plt.ylabel('$M_S$ (Nm/m)',fontsize=14)\nslope, intercept, r_value, p_value, std_err = stats.linregress(np.log10(mmm0_sim),np.log10(mmms_sim))\nprint('regression of moments for simulated')\nprint(slope,intercept)\nplt.text(1E16,1E12,'slope='+str(round(slope*10)/10),color='g',fontsize=14)\nplt.loglog(M0_sim,10**(intercept + slope*np.log10(M0_sim)),linewidth=3,color='g')\nplt.loglog(np.array([1E14,1E17]),np.array([1E14,1E17]),linewidth=1,color='r')\nplt.loglog(np.array([1E14,1E17]),np.array([1E14,1E17])/10,linewidth=1,color='r')\nplt.loglog(np.array([1E14,1E17]),np.array([1E14,1E17])/100,linewidth=1,color='r')\nplt.text(1E14,1E14,'r = 1',color='r',fontsize=14,rotation=30)\nplt.text(1E14,1.5E13,'r = 10',color='r',fontsize=14,rotation=30)\nplt.text(1E14,2E12,'r = 100',color='r',fontsize=14,rotation=30)\nplt.scatter(medM0,medMs,marker='s',c='orange',edgecolor='k',s=40,zorder=10)\nplt.savefig('Figure3.pdf')\n\n\n##### FIGURE S4 ###############\nfig=plt.figure(figsize=(8,10))\nfor i in range(len(quakelist)):\n if Nsub[i]==0:\n continue\n ibig=np.argmax(Ms[i,0:Nsub[i]])\n plt.subplot(311)\n plt.loglog(M0[i],Ms[i,0],'bo',markeredgecolor='black',markeredgewidth=0.2,alpha=0.5)\n plt.rcParams.update({'font.size': 14})\n plt.loglog(M0[i],Ms[i,ibig],'ro',markeredgecolor='black',markeredgewidth=0.2,alpha=0.5)\n plt.grid(True,linewidth=0.25)\n plt.title('a)',loc='left')\n plt.ylabel('$M_s$ (Nm)')\n plt.rcParams.update({'font.size': 14})\n\n\n plt.subplot(312)\n plt.loglog(M0[i],Dsub[i,0],'bo',markeredgecolor='black',markeredgewidth=0.2,alpha=0.5)\n plt.title('b)',loc='left')\n plt.ylabel('$T_S^0$ (s)')\n plt.grid(True,linewidth=0.25)\n plt.rcParams.update({'font.size': 14})\n\n\n plt.subplot(313)\n plt.semilogx(M0[i],Dsub[i,ibig]/Td[i]*100,'ro',markeredgecolor='black',markeredgewidth=0.2,alpha=0.3)\n plt.grid(True,linewidth=0.25)\n plt.ylim(0,100)\n plt.xlabel('$M_0$ (Nm)')\n plt.ylabel('Time of big subevent (s)')\n plt.title('c)',loc='left')\n plt.rcParams.update({'font.size': 14})\nplt.savefig('FigureS4.pdf')\n\n\n########## FIGURE S6 #############\nfig=plt.figure(figsize=(8,10))\nplt.subplot(211)\nplt.hist(np.asarray(err[II]),100)\nplt.title('a)',loc='left')\nplt.rcParams.update({'font.size': 14})\nplt.text(1.25,200,'median '+str(round(np.median(np.asarray(err[II]))*100)/100),fontsize=14)\nplt.text(1.25,100,'std '+str(round(np.std(np.asarray(err[II]))*100)/100),fontsize=14)\nplt.grid(True,linewidth=0.25)\nplt.xlim(0.1,2)\n\n\ndata=np.load('allvar_triangle.npz') # load the triangle variables.\nerr=data['err']\nNsub=data['Nsub']\nII=np.where(Nsub!=0)[0]\nplt.subplot(212)\nplt.hist(np.asarray(err[II]),100)\nplt.title('b)',loc='left')\nplt.rcParams.update({'font.size': 14})\nplt.text(1.25,120,'median '+str(round(np.median(np.asarray(err[II]))*100)/100),fontsize=14)\nplt.text(1.25,80,'std '+str(round(np.std(np.asarray(err[II]))*100)/100),fontsize=14)\nplt.grid(True,linewidth=0.25)\nplt.xlim(0.1,2)\nplt.savefig('FigureS6.pdf')\n\nplt.show()\n" }, { "alpha_fraction": 0.714989423751831, "alphanum_fraction": 0.7536945939064026, "avg_line_length": 32.046512603759766, "blob_id": "154414ddb200f85e84418fd2b59dc290d1a2bbf0", "content_id": "a0253339bf9a3db1a0a2485fa762e2981277f989", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1421, "license_type": "no_license", "max_line_length": 252, "num_lines": 43, "path": "/README.md", "repo_name": "mdenolle/Subevents2019", "src_encoding": "UTF-8", "text": "# Subevents2019\n\n\nScripts that were used in the following publication:\nDanré P, Yin J, Lipovsky BP, Denolle MA. Earthquakes within earthquakes: Patterns in rupture complexity. Geophysical Research Letters. 2019 Jul 16;46(13):7352-60.\n\nThis directory contains python scripts to perform a subevent decomposition of source time functions using Gaussian kernels. It contains resampled USGS data set of 180 STFs (as published in Hayes 2017), 500 STFs simulated using SBIEM from Pablo Ampuero.\nThe directory also contains matlab scripts used to run SBIEM in the context of the publication.\n\n\n# requirements.\nnumpy,matplotlib,sys,os,glob,scipy\n\n# List of scripts\n\nsubevent_scardec.py: main script for Danre et al, 2019. Includes plotting for the figures in the paper.\n\ntest_triangle.py: copy of main script but with triangle kernels\n\ntest_usgs.py: same as in main but for USGS database.\n\ntest_simulations.py same as in main for for simulated STFs.\n\n\n# list of STF files:\nUSGS_STF/ * . newstf: all USGS STFs sampled at dt=0.0730s, 180 of them (1990-2017)\n\nSIM_STF/ * .dat : all simulated STFs sampled at dt=0.0730s\n\n\n# list of variables\nallvar.npz: results out of main \n\nallvar_triangle.npz: results out of test_triangle.py \n\nallvar_usgs.npz: results out of test_usgs.py \n\nallvar_sim.npz: results out of test_simulations.py\n\n\n\n\nContact marine for any question: mdenolle(AT)fas(DOT)harvard(DOT)edu.\n" }, { "alpha_fraction": 0.5643213391304016, "alphanum_fraction": 0.6065045595169067, "avg_line_length": 35.20359420776367, "blob_id": "c7f93a8bb03b0d6d667dc37c67af549deab9f356", "content_id": "e17f74428cceea1193efc78ba9b83e45b50eced1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6211, "license_type": "no_license", "max_line_length": 163, "num_lines": 167, "path": "/test_simulations.py", "repo_name": "mdenolle/Subevents2019", "src_encoding": "UTF-8", "text": "## Packages required :\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sys,os, glob\r\nimport os.path\r\nfrom scipy import stats\r\nimport matplotlib\r\nfrom matplotlib import ticker\r\npi=np.pi\r\n\r\n\r\ndef gaussienne(scale,center,standard,time_vector) :\r\n '''\r\n Returns a Gaussian Function, centered on [center] and with a standard deviantion (related to width of the curve) of [std].\r\n \r\n Time_vector must be an array\r\n \r\n For more :\r\n \r\n - Width at mi-max is 2.355*[std].\r\n '''\r\n # print('Width at mi-max = ',2.355*standard)\r\n return (np.exp(-(time_vector-center)**2/(2*standard*standard))/(standard*np.sqrt(2*pi)))*scale\r\n\r\n\r\ndt=0.0703 # s, sampling space of SCARDEC\r\n\r\npath='./SIM_STF/*.dat'\r\nquakelist=glob.glob(path) # make a list of filenames\r\n\r\n\r\n# variable initization\r\nMs = np.zeros(shape=(len(quakelist),40),dtype=np.float64)\r\nTsub=np.zeros(shape=(len(quakelist),40),dtype=np.float64)\r\nDsub=np.zeros(shape=(len(quakelist),40),dtype=np.float64)\r\nMw = np.zeros(len(quakelist))\r\nM0 = np.zeros(len(quakelist))\r\nmo1 = np.zeros(len(quakelist))\r\nMM0 = np.zeros(len(quakelist))\r\nTd = np.zeros(len(quakelist))\r\nerr = np.zeros(len(quakelist))\r\nMw = np.zeros(len(quakelist))\r\nNsub = np.zeros(len(quakelist),dtype=np.int)\r\n\r\n\r\n\r\nfor i,filename in enumerate(quakelist):\r\n print(filename)\r\n piplot=np.loadtxt(filename,skiprows=1)\r\n opened=open(filename)\r\n opened.readline()\r\n # read STF:\r\n time=np.zeros(len(piplot));rate=np.zeros(len(piplot)) # initialize time and moment-rate vectors\r\n for ii in range(len(piplot)) : # read each time stamp\r\n time[ii]=(piplot[ii][0])\r\n rate[ii]=(piplot[ii][1]) \r\n # find index of positive time and reliable amplitudes.\r\n I=np.where( (rate>=0.001*np.max(rate)) & (time>=0)) [0]\r\n Td[i]=(time[I[-1]]) # duration of quake\r\n M0[i]=(np.trapz(rate[I],x=time[I])) # moment calculated from integrating the STF\r\n rate=rate/M0[i] # We normalize the STF\r\n rate0=rate # STF that will not undergo the Gaussian substractions\r\n\r\n sub=0 # initially, no peak detected => 0 peaks\r\n gauss_final=np.zeros(len(rate)) # Final Gaussian-built STF, of the same size as 'rate'\r\n for el in I : # go through time\r\n if rate[el-1]<rate[el] and rate[el]>rate[el+1] and rate[el]>(0.1*max(rate0)) and time[el]>0 : # peak detection / default = 0.10 for the min. value of peak\r\n error0=1e99 # initial error for the grid fit\r\n std0=0.\r\n for std in np.linspace(0.01/2.335,300/2.335,700) : # grid fit\r\n gauss=gaussienne(rate[el],time[el],std,time)\r\n gauss=gauss*rate[el]/max(gauss) \r\n error=np.sum((rate[el-5:el+5]-gauss[el-5:el+5])**2)\r\n if error<error0 :\r\n std0=std\r\n error0=error\r\n gauss0=gauss\r\n# # Computation of the event's magnitude given the subevent's magnitude\r\n # if duration is greater than 1s and shorter than entire source duration\r\n if std0>1/4 and 4*std0< 1.2*Td[i]:\r\n gauss_final=gauss_final+gauss0 # sum up the subevent gaussian\r\n rate=rate-gauss0 # make residual\r\n sub+=1 # increment subevent\r\n\r\n Ms[i,sub-1]=np.trapz(gauss0,x=time)*M0[i] # store moment of each subevent\r\n Tsub[i,sub-1]=time[el] # store time at which it occurs\r\n Dsub[i,sub-1]=std0*2*np.sqrt(2*np.log(10)) # duration of subevent\r\n \r\n MM0[i]=(np.trapz(gauss_final,x=time)*M0[i] ) # recover reconstructed moment \r\n Nsub[i]=(sub) # store number of subevents for that quake\r\n err[i]=(MM0[i]/M0[i]) # store error between reconstructed and true moment.\r\n Mw[i]=(2/3*np.log10(M0[i])-6.07) # store moment magnitude\r\n print(Mw[i])\r\n# store in variable. \r\nnp.savez('allvar_simulations',M0=M0,Ms=Ms,MM0=MM0,Nsub=Nsub,Tsub=Tsub,Dsub=Dsub,Td=Td,err=err,Mw=Mw)\r\n\r\n\r\ndata=np.load('allvar_simulations.npz')\r\nM0=data['M0'];Ms=data['Ms']\r\nMM0=data['MM0'];Nsub=data['Nsub'];Nsub=Nsub.astype(np.int)\r\nTsub=data['Tsub'];Dsub=data['Dsub']\r\nTd=data['Td'];#FM=data['FM']\r\n# depth=data['depth']\r\nerr=data['err']\r\nMw=data['Mw']\r\n\r\n\r\nplt.semilogx(M0,err,'o')\r\nplt.show()\r\n\r\n# #2: scardec\r\nmmm0=[];mmms=[];mmsub=[];fmm=[];nsuub=[]\r\nfor i in range(len(Nsub)):\r\n for ii in range(np.minimum(Nsub[i],len(Ms[i,:]))):\r\n mmm0.append(M0[i])\r\n mmms.append(Ms[i,ii])\r\n nsuub.append(Nsub[i])\r\n\r\n##### FIGURE S5 ###############\r\nplt.figure(figsize=(8,11))\r\nII=np.where(Nsub!=0)[0]\r\nprint(len(II),len(quakelist))\r\nMwbin=np.linspace(6,9,4).astype(np.int)\r\nM0bin=10**(3/2*np.linspace(6,9,4)+9.1)\r\nii=np.where(np.asarray(nsuub)>0)[0].astype(np.int)\r\nplt.subplot(211)\r\nplt.scatter(np.asarray(mmm0)[ii],np.asarray(nsuub)[ii],edgecolor=None,c='b',alpha=0.3,s=30)\r\nplt.xscale('log')\r\nplt.grid(True,linewidth=0.25)\r\nplt.rcParams['axes.axisbelow'] = True\r\nplt.xlabel('$M_0$ (Nm)',fontsize=14);plt.ylabel('Number of subevents',fontsize=14)\r\nplt.ylim(0,30);#plt.xlim(1E17,1E23)\r\nplt.title('a)',loc='left')\r\nax2=plt.twiny()\r\nax2.set_xticks(np.array((np.log10(M0bin)-17)/6))\r\nax2.set_xticklabels(Mwbin.astype(str))\r\nax2.set_xlabel('$M_W$')\r\nax2.xaxis.set_label_coords(1, 1.05)\r\n\r\n\r\n##### FIGURE 5b: PLOT MOMENT2MOMENT ###############\r\nM0bin1=10**(3/2*np.linspace(6,9,9)+9.1)\r\n# find the median\r\nmedMs=[];medM0=[]\r\nfor i in range(len(M0bin1)):\r\n ik=np.where((mmm0<=M0bin1[i]) & (mmm0>M0bin1[i-1]))[0]\r\n medMs.append(10**(np.median(np.log10(np.asarray(mmms)[ik]))))\r\n medM0.append(10**(np.mean(np.log10(M0bin1[i-1:i]))))\r\n\r\n\r\nplt.subplot(212)\r\n# SCARDEC\r\nplt.grid(True,linewidth=0.25)\r\nplt.scatter(mmm0,mmms,c='b',edgecolor='k',alpha=0.3,s=2)\r\nplt.xscale('log');plt.yscale('log')\r\nplt.xlabel('$M_0$ (Nm)',fontsize=14)\r\nplt.ylabel('$M_S$ (Nm)',fontsize=14)\r\nplt.title('b) ',loc='left',fontsize=14)\r\n\r\nax2=plt.twiny()\r\nax2.set_xticks(np.array((np.log10(M0bin)-17)/6))\r\nax2.set_xticklabels(Mwbin.astype(str))\r\nax2.set_xlabel('$M_W$')\r\nax2.xaxis.set_label_coords(1, 1.05)\r\n\r\nplt.show()" }, { "alpha_fraction": 0.5738412141799927, "alphanum_fraction": 0.6286982297897339, "avg_line_length": 37.97044372558594, "blob_id": "35323a6747dbfcf478dbbff981246c62d933e008", "content_id": "f8671b411de5288ee29b061f93dfb3e1c98d22f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8112, "license_type": "no_license", "max_line_length": 163, "num_lines": 203, "path": "/test_usgs.py", "repo_name": "mdenolle/Subevents2019", "src_encoding": "UTF-8", "text": "## DISCLAIMER\r\n\r\n# This program allows for the peak detection, gaussian fitting and parameters extractions of the subevents, as performed in Danre et al. (2019)\r\n# Here we take the example of all Source Time Functions provided by Hayes (2017)\r\n# The original version of the code was done by Philippe Danre.\r\n# The code has been modified by Marine Denolle, last version June 1st 2019 ([email protected])\r\n# this is the exact same code as in subevent_GRL_script.py except that we do not look into the focal mechanism information and focus on the USGS database.\r\n##\r\n## import modules\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sys,os, glob\r\nimport os.path\r\nfrom scipy import stats\r\nimport matplotlib\r\nfrom matplotlib import ticker\r\npi=np.pi\r\n\r\n\r\ndef gaussienne(scale,center,standard,time_vector) :\r\n '''\r\n Returns a Gaussian Function, centered on [center] and with a standard deviantion (related to width of the curve) of [std].\r\n \r\n Time_vector must be an array\r\n \r\n For more :\r\n \r\n - Width at mi-max is 2.355*[std].\r\n '''\r\n # print('Width at mi-max = ',2.355*standard)\r\n return (np.exp(-(time_vector-center)**2/(2*standard*standard))/(standard*np.sqrt(2*pi)))*scale\r\n\r\n\r\ndt=0.0703 # s, sampling space of SCARDEC\r\n\r\npath='./USGS_STFS/*.newstf'\r\nquakelist=glob.glob(path)\r\nM0=[];depth=[];FM=[];MM0=[];Nsub=[];dip=[];Td=[];mo1=[];err=[]\r\nMw=[]\r\n# variable initization\r\nMs = np.zeros(shape=(len(quakelist),40),dtype=np.float64)\r\nTsub=np.zeros(shape=(len(quakelist),40),dtype=np.float64)\r\nDsub=np.zeros(shape=(len(quakelist),40),dtype=np.float64)\r\nMw = np.zeros(len(quakelist))\r\ndepth = np.zeros(len(quakelist))\r\ndip = np.zeros(len(quakelist))\r\nFM = np.zeros(len(quakelist))\r\nM0 = np.zeros(len(quakelist))\r\nmo1 = np.zeros(len(quakelist))\r\nMM0 = np.zeros(len(quakelist))\r\nTd = np.zeros(len(quakelist))\r\nerr = np.zeros(len(quakelist))\r\nMw = np.zeros(len(quakelist))\r\nNsub = np.zeros(len(quakelist),dtype=np.int)\r\nfor i,filename in enumerate(quakelist):\r\n \r\n print(i,filename)\r\n piplot=np.loadtxt(filename,skiprows=1)\r\n opened=open(filename)\r\n opened.readline()\r\n\r\n\r\n # read STF:\r\n time=np.zeros(len(piplot));rate=np.zeros(len(piplot)) # initialize time and moment-rate vectors\r\n for ii in range(len(piplot)) : # read each time stamp\r\n time[ii]=(piplot[ii][0])\r\n rate[ii]=(piplot[ii][1]) \r\n # find index of positive time and reliable amplitudes.\r\n I=np.where( (rate>=0.001*np.max(rate)) & (time>=0)) [0]\r\n I = I[0:-1]\r\n Td[i]=(time[I[-1]]) # duration of quake\r\n M0[i]=(np.trapz(rate[I],x=time[I])) # moment calculated from integrating the STF\r\n rate=rate/M0[i] # We normalize the STF\r\n rate0=rate # STF that will not undergo the Gaussian substractions\r\n\r\n sub=0 # initially, no peak detected => 0 peaks\r\n gauss_final=np.zeros(len(rate)) # Final Gaussian-built STF, of the same size as 'rate'\r\n for el in I : # go through time\r\n if rate[el-1]<rate[el] and rate[el]>rate[el+1] and rate[el]>(0.1*max(rate0)) and time[el]>0 : # peak detection / default = 0.10 for the min. value of peak\r\n error0=1e99 # initial error for the grid fit\r\n std0=0.;gauss=0\r\n for std in np.linspace(0.01/2.335,300/2.335,700) : # grid fit\r\n gauss=gaussienne(rate[el],time[el],std,time)\r\n gauss=gauss*rate[el]/max(gauss) \r\n error=np.sum((rate[el-5:el+5]-gauss[el-5:el+5])**2)\r\n if error<error0 :\r\n std0=std\r\n error0=error\r\n gauss0=gauss\r\n# # Computation of the event's magnitude given the subevent's magnitude\r\n # if duration is greater than 1s and shorter than entire source duration\r\n if std0>1/4 and 4*std0< 1.2*Td[i]:\r\n gauss_final=gauss_final+gauss0 # sum up the subevent gaussian\r\n rate=rate-gauss0 # make residual\r\n sub+=1 # increment subevent\r\n\r\n Ms[i,sub-1]=np.trapz(gauss0,x=time)*M0[i] # store moment of each subevent\r\n Tsub[i,sub-1]=time[el] # store time at which it occurs\r\n Dsub[i,sub-1]=std0*2*np.sqrt(2*np.log(10)) # duration of subevent\r\n \r\n\r\n # one of the events is weird:\r\n if (2/3*np.log10(M0[i])-6.07 <= 7) and (sub>10):\r\n continue\r\n MM0[i]=(np.trapz(gauss_final,x=time)*M0[-1] ) # recover reconstructed moment \r\n Nsub[i]=(sub) # store number of subevents for that quake\r\n err[i]=(MM0[i]/M0[i]) # store error between reconstructed and true moment.\r\n Mw[i]=(2/3*np.log10(M0[i])-6.07) \r\nnp.savez('allvar_USGS',M0=M0,Ms=Ms,MM0=MM0,Nsub=Nsub,Tsub=Tsub,Dsub=Dsub,Td=Td,FM=FM,depth=depth,err=err,Mw=Mw)\r\n\r\n\r\n\r\n#exit()\r\ndata=np.load('allvar_USGS.npz')\r\nM0=data['M0'];Ms=data['Ms']\r\nMM0=data['MM0'];Nsub=data['Nsub']\r\nTsub=data['Tsub'];Dsub=data['Dsub']\r\nTd=data['Td'];FM=data['FM']\r\ndepth=data['depth']\r\nerr=data['err']\r\nMw=data['Mw']\r\n\r\n\r\nmmm0=[];mmms=[];mmsub=[];fmm=[];nsuub=[]\r\nfor i in range(len(quakelist)):\r\n for ii in range(Nsub[i]):\r\n mmm0.append(M0[i])\r\n mmms.append(Ms[i,ii])\r\n mmsub.append(Nsub[i])\r\n fmm.append(FM[i])\r\n nsuub.append(Nsub[i])\r\n\r\n\r\n\r\n####### FIGURE S1 ##################\r\nplt.figure(figsize=(8,11))\r\nII=np.where(Nsub!=0)[0]\r\nslope, intercept, r_value, p_value, std_err = stats.linregress(np.log10(M0[II]),Nsub[II])\r\nprint(slope,intercept,r_value)\r\nMwbin=np.linspace(6,9,4).astype(np.int)\r\nM0bin=10**((3/2)*(Mwbin+6.07))\r\nii=np.where(np.asarray(nsuub)>0)[0].astype(np.int)\r\nplt.subplot(211)\r\nplt.scatter(np.asarray(mmm0)[ii],np.asarray(mmsub)[ii],edgecolor=None,c='b',alpha=0.3,s=30)\r\nplt.text(1E18,4,'slope='+str(round(slope*10)/10),color='r',fontsize=14,rotation=10)\r\nplt.xscale('log')\r\nplt.grid(True,linewidth=0.25)\r\nplt.rcParams['axes.axisbelow'] = True\r\nplt.xlabel('$M_0$ (Nm)',fontsize=14);plt.ylabel('Number of subevents',fontsize=14)\r\nplt.ylim(0,40);plt.xlim(1E17,1E23)\r\nplt.semilogx(M0,intercept + slope*np.log10(M0),linewidth=3,color='r')\r\nplt.title('a)',loc='left')\r\nax2=plt.twiny()\r\nax2.set_xticks(np.array((np.log10(M0bin)-17)/6))\r\nax2.set_xticklabels(Mwbin.astype(str))\r\nax2.set_xlabel('$M_W$')\r\nax2.xaxis.set_label_coords(1, 1.05)\r\n\r\n\r\nMwbin=np.linspace(6,9,4).astype(np.int)\r\nM0bin=10**((3/2)*(Mwbin+6.07))\r\nMwbin1=np.linspace(5,9,18)\r\nM0bin1=10**((3/2)*(Mwbin1+6.07))\r\n# find the median\r\nmedMs=[];medM0=[]\r\nfor i in range(3,len(Mwbin1)):\r\n ik=np.where((mmm0<=M0bin1[i]) & (mmm0>M0bin1[i-1]))[0]\r\n medMs.append(10**(np.median(np.log10(np.asarray(mmms)[ik]))))\r\n medM0.append(10**(np.mean(np.log10(M0bin1[i-1:i]))))\r\n\r\n\r\nplt.subplot(212)\r\nplt.scatter(mmm0,mmms,c='b',edgecolor='k',alpha=0.3)\r\nplt.xscale('log');plt.yscale('log')\r\nplt.xlim(1E17,1E23);plt.ylim(1E15,1E23)\r\nplt.xlabel('$M_0$ (Nm)',fontsize=14)\r\nplt.ylabel('$M_S$ (Nm)',fontsize=14)\r\nplt.grid(True,linewidth=0.25)\r\nslope, intercept, r_value, p_value, std_err = stats.linregress(np.log10(mmm0),np.log10(mmms))\r\n\r\nprint('regression for scaling with moments')\r\nprint(slope,intercept,r_value)\r\nplt.text(1E21,1E16,'slope='+str(round(slope*100)/100),color='g',fontsize=14)\r\nplt.loglog(mmm0,np.power(10,intercept + slope*np.log10(mmm0)),linewidth=3,color='g')\r\nplt.title('b) ',loc='left',fontsize=14)\r\nplt.loglog(np.array([1E17,1E23]),np.array([1E17,1E23]),linewidth=1,color='r')\r\nplt.loglog(np.array([1E17,1E23]),np.array([1E17,1E23])/10,linewidth=1,color='r')\r\nplt.loglog(np.array([1E17,1E23]),np.array([1E17,1E23])/100,linewidth=1,color='r')\r\n\r\nplt.scatter(medM0,medMs,marker='s',c='orange',edgecolor='k',s=40,zorder=10)\r\n\r\nplt.text(1E18,7E18,'r = 1',color='r',fontsize=14,rotation=25)\r\nplt.text(1E18,5E17,'r = 10',color='r',fontsize=14,rotation=25)\r\nplt.text(1E18,7E16,'r = 100',color='r',fontsize=14,rotation=25)\r\nax2=plt.twiny()\r\nax2.set_xticks(np.array((np.log10(M0bin)-17)/6))\r\nax2.set_xticklabels(Mwbin.astype(str))\r\nax2.set_xlabel('$M_W$')\r\nax2.xaxis.set_label_coords(1, 1.05)\r\n\r\nplt.savefig('FigureS1.pdf')\r\nplt.show()" } ]
4
leiladanesh/assignment7
https://github.com/leiladanesh/assignment7
4b5c67441c2d4b27af37af6a74700ef45a22e86e
fe6405b80c525bb1e7f6f8115512f3d9d058ce46
7ac238311e5b93bbe25cec3d9350b531b22ee1a7
refs/heads/main
2023-07-16T13:46:27.837011
2021-08-21T11:06:17
2021-08-21T11:06:17
398,535,953
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.4030206799507141, "alphanum_fraction": 0.4069952368736267, "avg_line_length": 37.13131332397461, "blob_id": "e7a43a8d2923b8776be80f85c74f90db1d4c033e", "content_id": "2b92588d3fe49cf854537baca4140a30261ced37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3774, "license_type": "no_license", "max_line_length": 99, "num_lines": 99, "path": "/translat.py", "repo_name": "leiladanesh/assignment7", "src_encoding": "UTF-8", "text": "from typing import Dict\nWORDS = []\n\ndef show_menu():\n print('1- add new word ')\n print('2- translation english to persian ')\n print('3- translation persian to english ')\n print('4- exit ')\n\ndef add_word():\n\n f=0\n word_english = input('Please enter english word: ')\n for i in range(len(WORDS)):\n if WORDS[i]['english'] == word_english :\n print('This word is exist in translate file.')\n f=0\n else:\n word_persian = input('Please enter persian word : ')\n dict = {}\n dict['english'] = word_english\n dict['persian'] = word_persian\n WORDS.append(dict)\n write_to_file()\n break\n if f == 0:\n break\n\ndef file_to_list():\n\n try:\n file = open('translate.txt' , 'r')\n my_words = file.read().split('\\n')\n for i in range(len(my_words)):\n \n if i % 2 == 0:\n dict = {}\n dict['english'] = my_words[i]\n else:\n dict['persian'] = my_words[i]\n WORDS.append(dict)\n except:\n print('Cant find file. ') \n\n\ndef write_to_file():\n new_word = ''\n for i in range(len(WORDS)):\n english = WORDS[i]['english']\n persian = WORDS[i]['persian']\n new_word ='\\n'+ english + '\\n' + persian\n file = open('translate.txt' , 'a')\n myfile = file.write(new_word) \n\ndef translate_english_to_persian():\n\n sentences = input('Please enter your sentence in english: ')\n translate_to_persian = ''\n sentence = sentences.split('.')\n for i in range(len(sentence)):\n word = sentence[i].split(' ')\n for z in range(len(word)):\n for j in range(len(WORDS)):\n if WORDS[j]['english'] == word[z]:\n if z == len(word)-1:\n translate_to_persian += WORDS[j]['persian'] + '.'\n else:\n translate_to_persian += WORDS[j]['persian'] + ' ' \n print('Translate: ' , translate_to_persian) \n\ndef translate_persian_to_english():\n sentences = input('Please enter your sentence in persien: ')\n translate_to_english = ''\n sentence = sentences.split('.')\n for i in range(len(sentence)):\n word = sentence[i].split(' ')\n for z in range(len(word)):\n for j in range(len(WORDS)):\n if WORDS[j]['persian'] == word[z]:\n if z == len(word)-1:\n translate_to_english += WORDS[j]['english'] + '.'\n else:\n translate_to_english += WORDS[j]['english'] + ' ' \n print('Translate: ' , translate_to_english)\n\nwhile True:\n show_menu()\n file_to_list()\n choice = int(input('Please choose an option: '))\n if choice == 1:\n add_word()\n elif choice == 2:\n translate_english_to_persian()\n elif choice == 3:\n translate_persian_to_english()\n elif choice == 4:\n exit()\n else:\n print('Wrong choice! Try again. ')" }, { "alpha_fraction": 0.7542372941970825, "alphanum_fraction": 0.7711864113807678, "avg_line_length": 28.75, "blob_id": "9ed80110934899b7972326ed7448264b9d14fb35", "content_id": "02d39f493952017c30526bfc5edca22e8a849c47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 45, "num_lines": 4, "path": "/convert mp4.py", "repo_name": "leiladanesh/assignment7", "src_encoding": "UTF-8", "text": "from moviepy import editor\n\nvideo = editor.VideoFileClip(' marzieh.mp4 ')\nvideo.audio.write_audiofile(' marzieh .mp3')" } ]
2
shade9795/Cursos
https://github.com/shade9795/Cursos
3c0c923d3b51af0fd2c426405875daf04669a19b
90239fc655122ace2be5b48ce2806a385e6c8ee5
176757f4287edcd682ab059c8b60dd8e22b982e2
refs/heads/main
2023-02-17T21:42:10.871068
2021-01-15T06:19:38
2021-01-15T06:19:38
329,777,866
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6287878751754761, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 25.5, "blob_id": "b83c506bf45cbc00c8a9be369d9f139853891908", "content_id": "86769d1ef39d4cb1be29d3b7f0fffda1d4731cd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 264, "license_type": "no_license", "max_line_length": 58, "num_lines": 10, "path": "/python/ejercicio31.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "x=1\ncantidad=0\nn=int(input(\"ingrese la cantidad de piezas a procesar: \"))\nwhile x<=n:\n largo=float(input(\"largo de la pieza: \"))\n if largo>=1.20 and largo<=1.30:\n cantidad=cantidad+1\n x=x+1\nprint(\"lacantidad de piezas actas es de:\")\nprint(cantidad)" }, { "alpha_fraction": 0.5793103575706482, "alphanum_fraction": 0.5931034684181213, "avg_line_length": 17.25, "blob_id": "8f123fa07ca2d014ae829fe124bd750e07af53c2", "content_id": "a430659c79988fc97792c49dfbada3def2793e00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 147, "license_type": "no_license", "max_line_length": 30, "num_lines": 8, "path": "/python/problemas/Condiciones compuestas con operadores lógicos/problema1.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "dia=int(input(\"Dia: \"))\nmes=int(input(\"Mes: \"))\naño=int(input(\"Año: \"))\n\nif mes==12:\n print(\"Es navidad\")\nelse:\n print(\"Aun no es navidad\")" }, { "alpha_fraction": 0.5560747385025024, "alphanum_fraction": 0.5981308221817017, "avg_line_length": 14.357142448425293, "blob_id": "bc6485912c52dc596de522f927406b0e5745530f", "content_id": "0e1ac640d26309193bc6b3a86ae4e482ae5b90f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 214, "license_type": "no_license", "max_line_length": 29, "num_lines": 14, "path": "/python/problemas/Estructura repetitiva while/problema1.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "x=1\nmayor=0\nmenor=0\nwhile x<=10:\n nota=int(input(\"nota: \"))\n if nota>=7:\n mayor=mayor+1\n else:\n menor=menor+1\n x=x+1\nprint(\"Notas mayores\")\nprint(mayor)\nprint(\"Notas menores\")\nprint(menor)" }, { "alpha_fraction": 0.6183574795722961, "alphanum_fraction": 0.6521739363670349, "avg_line_length": 24.875, "blob_id": "b1dd6172498f540b51722600f016a701c8c95d7b", "content_id": "c7890df29d7acd9f145a9ef1f5b55be3db2ea45c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 48, "num_lines": 16, "path": "/python/problemas/Estructuras condicionales simples y compuestas/problema1.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "num1=int(input(\"ingrese num1: \"))\nnum2=int(input(\"ingrese num2: \"))\nif num1>num2:\n suma=num1+num2\n print(\"la suma de los valores es:\")\n print(suma)\n resta=num1-num2\n print(\"la resta de los valores es:\")\n print(resta)\nelse:\n multi=num1*num2\n print(\"la multiplicaion de los valores es:\")\n print(multi)\n divicion=num1/num2\n print(\"la divicion de los valores es:\")\n print(divicion)\n" }, { "alpha_fraction": 0.5254902243614197, "alphanum_fraction": 0.5607843399047852, "avg_line_length": 22.272727966308594, "blob_id": "5af9931029bef4345ffc1528219484cd363fbcfa", "content_id": "7665891c5ebfb614ab6d2ff6ad0e3a64db4499cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 53, "num_lines": 11, "path": "/python/problemas/Condiciones compuestas con operadores lógicos/problema5.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "x=int(input(\"Cordenada X: \"))\ny=int(input(\"Cordenada Y: \"))\n\nif x==0 or y==0:\n print(\"Las coordenadas deben ser diferentes a 0\")\nelse:\n if x>0 and y>0:\n print(\"1º Cuadrante\")\n else:\n if x<0 and y>0:\n print(\"2º Cuadrante\")" }, { "alpha_fraction": 0.6172839403152466, "alphanum_fraction": 0.6502057909965515, "avg_line_length": 29.375, "blob_id": "9590066a1d571028508ef080f956be188b871017", "content_id": "43c394f36d2b29bd5f34bd3341980ebe65e3db41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 243, "license_type": "no_license", "max_line_length": 65, "num_lines": 8, "path": "/python/problemas/Estructuras condicionales simples y compuestas/problema3.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "num=int(input(\"Ingrese un numero del 1 al 99: \"))\nif num<10:\n print(\"su numero tiene una cifras\")\nelse:\n if num<100:\n print(\"su numero tiene dos cifras\")\n else:\n print(\"el numero ingresado supera la medida establecida\")\n" }, { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.811965823173523, "avg_line_length": 22.399999618530273, "blob_id": "d34e90e5df235e4ae0ce8528acaec5f662e2de8c", "content_id": "690f2d8ebb28cc4a364511f9bd82c6b77526365f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 37, "num_lines": 5, "path": "/python/problemas/Estructura de programación secuencial/problema4.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "htrabajadas=120\nPporhora=2\nmensualidad=htrabajadas*Pporhora\nprint(\"el sueldo de este mes es de:\")\nprint(mensualidad)\n" }, { "alpha_fraction": 0.5912408828735352, "alphanum_fraction": 0.6496350169181824, "avg_line_length": 18.571428298950195, "blob_id": "4fcfe12db265ef29658f1aa531eeecf1867da442", "content_id": "bcb99d64d75157b0da9e6290ca4bf89594d6c76a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/python/ejercicio10.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "num1=int(input(\"numero 1:\"))\nnum2=int(input(\"numero 2:\"))\nprint(\"El valor mayor es\")\nif num1>num2:\n print(num1)\nelse:\n print(num2)\n" }, { "alpha_fraction": 0.49640288949012756, "alphanum_fraction": 0.528777003288269, "avg_line_length": 21.5, "blob_id": "60fd860a0a555f60535342a616be02ee768b2ea6", "content_id": "404ac977d6f10ac74dcede3e6758a09d0f55975e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 278, "license_type": "no_license", "max_line_length": 58, "num_lines": 12, "path": "/python/problemas/Estructuras condicionales anidadas/problema3.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "num=int(input(\"ingrese un numero de hasta tres cifras: \"))\n\nif num>1000:\n print(\"cifra exedida del limite\")\nelse:\n if num>=100:\n print(\"tres cifras\")\n else:\n if num>=10:\n print(\"dos cifras\")\n else:\n print(\"una cifra\")\n " }, { "alpha_fraction": 0.48373982310295105, "alphanum_fraction": 0.5487805008888245, "avg_line_length": 16.64285659790039, "blob_id": "aa10e67a2d817e08e6b9cbffaedccd67236ed969", "content_id": "200338442fd05f25e8d444fe659e19384b6a6607", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 29, "num_lines": 14, "path": "/python/problemas/Estructuras condicionales anidadas/problema1.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "num1=int(input(\"numero 1: \"))\nnum2=int(input(\"numero 2: \"))\nnum3=int(input(\"numero 3: \"))\n\nif num1>num2:\n if num1>num3:\n print(num1)\n else:\n print(num3)\nelse:\n if num2>num3:\n print(num2)\n else:\n print(num3)" }, { "alpha_fraction": 0.6305732727050781, "alphanum_fraction": 0.662420392036438, "avg_line_length": 16.55555534362793, "blob_id": "52c89015c7a93a9d1a6c358f5eac36cfe21b7868", "content_id": "42665681b70c4d154bdebc0bb1587810cd752b4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 35, "num_lines": 9, "path": "/python/problemas/Estructura repetitiva while/problema2.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "x=1\nsuma=0\nwhile x<=5:\n altura=float(input(\"Altura: \"))\n suma=suma+altura\n x=x+1\npromedio=suma/5\nprint(\"El promedio de altura es: \")\nprint(promedio)" }, { "alpha_fraction": 0.5858123302459717, "alphanum_fraction": 0.6361556053161621, "avg_line_length": 24.764705657958984, "blob_id": "5ba4d39f274e8ddd40497506f665fabe105154f7", "content_id": "7eae0a3fcf232b89ad99d9a229e0ce5d629a58a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 43, "num_lines": 17, "path": "/python/problemas/Condiciones compuestas con operadores lógicos/problema6.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "sueldo=int(input(\"Sueldo: \"))\nantiguedad=int(input(\"antiguedad: \"))\n\nif sueldo<500 and antiguedad>=10:\n porcent=sueldo*20/100\n newsuel=sueldo+porcent\n print(\"aumento del 20%\")\n print(newsuel)\nelse:\n if sueldo<500 and antiguedad<10:\n porcent=sueldo*5/100\n newsuel=sueldo+porcent\n print(\"aumento del 5%\")\n print(newsuel)\n else:\n print(\"Sueldo a pagar sin aumento\")\n print(sueldo)" }, { "alpha_fraction": 0.6464088559150696, "alphanum_fraction": 0.6629834175109863, "avg_line_length": 29.33333396911621, "blob_id": "bdbc33109f40ea9f6d0845fcb039467bbcc09d92", "content_id": "a43a55cc6c3ab86ad4c4187348dc9d83e644b09a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 44, "num_lines": 6, "path": "/python/ejercicio20.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "dia=int(input(\"Ingrese el dia: \"))\nmes=int(input(\"Ingrese el mes: \"))\naño=int(input(\"Ingrese el año: \"))\n\nif mes==1 or mes==2 or mes==3:\n print(\"Corresponde al primer trimestre\")" }, { "alpha_fraction": 0.6289592981338501, "alphanum_fraction": 0.6877828240394592, "avg_line_length": 21.100000381469727, "blob_id": "f5c0f9b6e884fb5fba156515174f02b4cc8a5df1", "content_id": "03a4dc0b52b88653d82b0ffc837a89a228d84ffb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 26, "num_lines": 10, "path": "/python/problemas/Estructura de programación secuencial/problema3.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "num1=int(input(\"num1: \"))\nnum2=int(input(\"num2: \"))\nnum3=int(input(\"num3: \"))\nnum4=int(input(\"num4: \"))\nsuma=num1+num2+num3+num4\npromedio=suma/4\nprint(\"la usma total es:\")\nprint(suma)\nprint(\"el promedio\")\nprint(promedio)\n" }, { "alpha_fraction": 0.5347043871879578, "alphanum_fraction": 0.5912596583366394, "avg_line_length": 15.208333015441895, "blob_id": "320c0663124b4ef8c8eec7f76b1ba172ff0c2202", "content_id": "0767bece2ba5473139e6c702609e6a0a2b0bcc35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 389, "license_type": "no_license", "max_line_length": 32, "num_lines": 24, "path": "/python/problemas/Estructura repetitiva while/problema6.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "x=1\ntotal1=0\ntotal2=0\n\nprint(\"Primera lista\")\nwhile x<=5:\n valor=int(input(\"valor: \"))\n total1=total1+valor\n x=x+1\n \nx=1\nprint(\"Segunda lista\")\nwhile x<=5:\n valor1=int(input(\"valor: \"))\n total2=total2+valor1\n x=x+1\n \nif total1==total2:\n print(\"Listas iguales\")\nelse:\n if total1>total2:\n print(\"Lista 1 Mayor\")\n else:\n print(\"lista 2 Mayor\")\n" }, { "alpha_fraction": 0.6012658476829529, "alphanum_fraction": 0.6708860993385315, "avg_line_length": 21.571428298950195, "blob_id": "107efdc179442eb04eee3e0745f09ef0aba8ac2f", "content_id": "c7354ee3ec3843ba0786413aa9b46054fe51f8cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 158, "license_type": "no_license", "max_line_length": 27, "num_lines": 7, "path": "/python/problemas/Estructuras condicionales simples y compuestas/problema2.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "nota1=int(input(\"nota1: \"))\nnota2=int(input(\"nota2: \"))\nnota3=int(input(\"nota3: \"))\nsuma= nota1+nota2+nota3\nprom=suma/3\nif prom>=7:\n print(\"promocionado\")\n" }, { "alpha_fraction": 0.516339898109436, "alphanum_fraction": 0.529411792755127, "avg_line_length": 17.625, "blob_id": "c7d72b29177e6ec22bd6526896e9aba133e11df8", "content_id": "62fb8f13020cbcf3b7b682f598d40393d784b969", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 153, "license_type": "no_license", "max_line_length": 37, "num_lines": 8, "path": "/python/problemas/Estructuras condicionales anidadas/problema2.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "num=int(input(\"ingrese un numero: \"))\nif num==0:\n print(\"nulo\")\nelse:\n if num>0:\n print(\"Positivo\")\n else:\n print(\"Negativo\")\n " }, { "alpha_fraction": 0.5570175647735596, "alphanum_fraction": 0.5964912176132202, "avg_line_length": 14.266666412353516, "blob_id": "ae0b3e5caf7b71770493fe5b337bb70f171f4ff9", "content_id": "0239df8ea575dc1f4afb0bd4a8539d66f345b4fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 228, "license_type": "no_license", "max_line_length": 42, "num_lines": 15, "path": "/python/problemas/Estructura repetitiva while/problema7.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "x=1\npares=0\nimpares=0\nwhile x<=6:\n valor=int(input(\"ingrese un valor: \"))\n if valor%2==0:\n pares=pares+1\n else:\n impares=impares+1\n x=x+1\n\nprint(\"pares: \")\nprint(pares)\nprint(\"impares: \")\nprint(impares)" }, { "alpha_fraction": 0.5789473652839661, "alphanum_fraction": 0.6783625483512878, "avg_line_length": 27.66666603088379, "blob_id": "4a966a0ac45ebb7d8ee8cd367bef9bfa9a72a4e6", "content_id": "5de191f7553afdf760504fa6bc72f6f117081885", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 171, "license_type": "no_license", "max_line_length": 47, "num_lines": 6, "path": "/python/problemas/Condiciones compuestas con operadores lógicos/problema2.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "num1=int(input(\"numero1: \"))\nnum2=int(input(\"numero2: \"))\nnum3=int(input(\"numero3: \"))\n\nif num1<10 and num2<10 and num3<10:\n print(\"Todos los numeros son menores a 10\")" }, { "alpha_fraction": 0.640816330909729, "alphanum_fraction": 0.6897959113121033, "avg_line_length": 23.5, "blob_id": "ddc4112ca99f04ff02838546bd3543b03e0cf3a1", "content_id": "e65bf1aeac7ceaeafdd0a43abd4af94528ae6f99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 245, "license_type": "no_license", "max_line_length": 37, "num_lines": 10, "path": "/python/problemas/Estructura de programación secuencial/problema2.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "num1=int(input(\"num 1: \"))\nnum2=int(input(\"num 2: \"))\nnum3=int(input(\"num 3: \"))\nnum4=int(input(\"num 4: \"))\nsuma=num1+num2\nproducto=num3*num4\nprint(\"el resultado de la suma es:\")\nprint(suma)\nprint(\"el resultado del producto es\")\nprint(producto)\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6240000128746033, "avg_line_length": 24, "blob_id": "69996530b5f91c515b40178e8a548f6b867100e3", "content_id": "4ddc2664b245e73efcd37e31c077966572cd1c7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 375, "license_type": "no_license", "max_line_length": 69, "num_lines": 15, "path": "/python/problemas/Estructuras condicionales anidadas/problema4.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "preguntas=int(input(\"Ingrese la cantidad de preguntas: \"))\ncorrectas=int(input(\"Ingrese la cantidad de respuestas correctas: \"))\n\nporcen=preguntas*correctas/100\n\nif porcen>=90:\n print(\"Nivel Maximo\")\nelse:\n if porcen>=75:\n print(\"Nivel Medio\")\n else:\n if porcen>=50:\n print(\"Nivel Regular\")\n else:\n print(\"Fuera de nivel\")\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.6177605986595154, "avg_line_length": 19, "blob_id": "ebe60422b296dcb7cafc6c8c1589bc2f59719482", "content_id": "7f06daf5fc3a94160371536da0d92f4c80962723", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 259, "license_type": "no_license", "max_line_length": 36, "num_lines": 13, "path": "/python/ejercicio14.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "nota1=int(input(\"ingrese nota 1: \"))\nnota2=int(input(\"ingrese nota 2: \"))\nnota3=int(input(\"ingrese nota 3: \"))\n\nprom=nota1+nota2+nota3/3\n\nif prom>=7:\n print(\"Promocionado\")\nelse:\n if prom>=4:\n print(\"regular\")\n else:\n print(\"Reprobado\")" }, { "alpha_fraction": 0.758169949054718, "alphanum_fraction": 0.758169949054718, "avg_line_length": 29.600000381469727, "blob_id": "f18161063141ca4c5ca0f71ab54e491f93c17acf", "content_id": "c6f8ea40c7076cf7fcd22ae94bad91502918ddf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 153, "license_type": "no_license", "max_line_length": 40, "num_lines": 5, "path": "/python/ejercicio4.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "precio=int(input(\"Ingrese el precio: \"))\ncantidad=int(input(\"ingrese cantidad\"))\nimporte=precio*cantidad\nprint(\"el importe total es de:\")\nprint(importe)\n" }, { "alpha_fraction": 0.5872092843055725, "alphanum_fraction": 0.6744186282157898, "avg_line_length": 27.83333396911621, "blob_id": "d0eec1821931397e4504f8deb8c956676ae2c7f2", "content_id": "3da454dac4f5a140b6ac7259340c96f00609b25a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "no_license", "max_line_length": 50, "num_lines": 6, "path": "/python/problemas/Condiciones compuestas con operadores lógicos/problema3.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "num1=int(input(\"numero1: \"))\nnum2=int(input(\"numero2: \"))\nnum3=int(input(\"numero3: \"))\n\nif num1<10 or num2<10 or num3<10:\n print(\"Alguno de los numeros es menor a diez\")" }, { "alpha_fraction": 0.5780346989631653, "alphanum_fraction": 0.6531791687011719, "avg_line_length": 20.75, "blob_id": "f5fb7aca450d3d8f08a176727a2ac4a041945f49", "content_id": "2d6d672f2be1b2c9ced8f638e13cca3bf7c3d6e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 173, "license_type": "no_license", "max_line_length": 29, "num_lines": 8, "path": "/python/problemas/Condiciones compuestas con operadores lógicos/problema4.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "num1=int(input(\"numero1: \"))\nnum2=int(input(\"numero2: \"))\nnum3=int(input(\"numero3: \"))\n\nif num1==num2 and num2==num3:\n suma=num1+num2\n multi=suma*num3\n print(multi)" }, { "alpha_fraction": 0.5405405163764954, "alphanum_fraction": 0.5675675868988037, "avg_line_length": 14, "blob_id": "ac0f76943ebe35117054824463d9751ae3acd31b", "content_id": "6a57f6e39321710d138c6760c48b7d67fdc083f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 74, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/python/ejercicio29.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "n=int(input(\"Ingrese un numero: \"))\nx=1\nwhile x<=n:\n print(x)\n x=x+1" }, { "alpha_fraction": 0.7345132827758789, "alphanum_fraction": 0.7433628439903259, "avg_line_length": 27.25, "blob_id": "01a2365eeadd30ddf4ecf878f537b74c372a443d", "content_id": "c7a80cb978ab8bab7b18d8f3877eba10953e44b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 41, "num_lines": 4, "path": "/python/problemas/Estructura de programación secuencial/problema1.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "lado=int(input(\"ingrese el lado: \"))\nperimetro=lado*4\nprint(\"El perimetro del cuadrado es de:\")\nprint(perimetro)\n" }, { "alpha_fraction": 0.3272727131843567, "alphanum_fraction": 0.4727272689342499, "avg_line_length": 8.333333015441895, "blob_id": "9fc88074fe89b8cc54f92fadc63e77c892aa3b11", "content_id": "5f3c1290e6e81aadb48a2ecf3b419fbe532068c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 12, "num_lines": 6, "path": "/python/problemas/Estructura repetitiva while/problema4.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "x=1\ny=11\nwhile x<=25:\n print(y)\n y=y+11\n x=x+1" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 13, "blob_id": "df5a490b4dc53b5ecbb7a4efeea238878b1e10d0", "content_id": "bcb416fc9274ceac0c60b188653fc454c93d6d03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 16, "num_lines": 4, "path": "/python/problemas/Estructura repetitiva while/problema5.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "mult=8\nwhile mult<=500:\n print(mult)\n mult=mult+8" }, { "alpha_fraction": 0.7122641801834106, "alphanum_fraction": 0.7405660152435303, "avg_line_length": 25.5, "blob_id": "3a60ea024ff6f0cd2a45efba6d2fd0c9df851332", "content_id": "36f05fa1d03bf911e38cf229266a29571e85617a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 42, "num_lines": 8, "path": "/python/ejercicio3.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "num1=int(input(\"ingrese un numero:\"))\nnum2=int(input(\"ingrese segundo numero:\"))\nsuma=num1+num2\nproducto=num1*num2\nprint(\"la suma da un total de:\")\nprint(suma)\nprint(\"el producto da un total de\")\nprint(producto)\n" }, { "alpha_fraction": 0.6182669997215271, "alphanum_fraction": 0.688524603843689, "avg_line_length": 18.454545974731445, "blob_id": "6ee7a3cb0a8b5b907c1d3c6e73d748724c60a04a", "content_id": "30830a74f9dbd5b344256e72e36315c7af7aab9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 427, "license_type": "no_license", "max_line_length": 58, "num_lines": 22, "path": "/python/problemas/Estructura repetitiva while/problema3.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "x=1\nimporte=0\nconta1=0\nconta2=0\nempleados=int(input(\"Ingrese la cantidad de empleados: \"))\nwhile x<=empleados:\n sueldo=int(input(\"sueldo: \"))\n\n if sueldo>=100 and sueldo<=300:\n conta1=conta1+1\n else:\n conta2=conta2+1\n\n importe=importe+sueldo\n x=x+1\n\nprint(\"sueldos entre 100 y 300\")\nprint(conta1)\nprint(\"sueldos mayores a 300\")\nprint(conta2)\nprint(\"importe total gastado en sueldos\")\nprint(importe)" }, { "alpha_fraction": 0.6229507923126221, "alphanum_fraction": 0.6612021923065186, "avg_line_length": 14.333333015441895, "blob_id": "d9919c883e6571f2387b33f0e06bc3ec1dc59edd", "content_id": "bc4a3d264bf977b71d739fe345be6f8549132598", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 42, "num_lines": 12, "path": "/python/ejercicio30.py", "repo_name": "shade9795/Cursos", "src_encoding": "UTF-8", "text": "x=1\nsuma=0\nwhile x<=10:\n valor=int(input(\"ingresa un valor: \"))\n suma=suma+valor\n x=x+1\n\npromedio=suma//10\nprint(\"suma total:\")\nprint(suma)\nprint(\"promedio:\")\nprint(promedio)" } ]
32
vnespinoza/FinalProject
https://github.com/vnespinoza/FinalProject
5686be74be3564f37c4050795dbb34ee2d523a37
d46b8e84022aa87645467824e1fad340b2bed815
ead36bdc3fc42a477c670ef92b905e536dc4d502
refs/heads/master
2021-01-21T18:11:40.763918
2017-05-22T07:46:07
2017-05-22T07:46:07
92,024,158
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7297297120094299, "alphanum_fraction": 0.7297297120094299, "avg_line_length": 23.66666603088379, "blob_id": "bbea23f15c6ff077c7666e3083b6e33fa7c831c3", "content_id": "f19cc4d313823927f967d9e45ed3086538598640", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 74, "license_type": "no_license", "max_line_length": 41, "num_lines": 3, "path": "/README.md", "repo_name": "vnespinoza/FinalProject", "src_encoding": "UTF-8", "text": "# FinalProject\nIs it English or is it not English? <br/>\nVeronica & Diana\n" }, { "alpha_fraction": 0.6926895380020142, "alphanum_fraction": 0.7041215300559998, "avg_line_length": 40.54999923706055, "blob_id": "37377c7b0504a29d36101808a6b958f15a8e3daa", "content_id": "4f0ca9cf926f71ddc03539c37944ae9fd54f1998", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6648, "license_type": "no_license", "max_line_length": 161, "num_lines": 160, "path": "/fourgram.py", "repo_name": "vnespinoza/FinalProject", "src_encoding": "UTF-8", "text": "# Naive bayes classifier for English language identification\n\n# 1. Separate words into two groups -- English (0) and not-English (1).\n# 2. Count within each group.\n# 3. Adjust the counts by applying add-one smoothing. Add the UNKNOWN bigram.\n# 4. Change the adjusted counts to probabilities.\n# 5. Write a function that uses the probabilities to classify a word. To avoid underflow errors, add log-probabilities rather than multiplying the probabilities.\n# 6. Apply the function to the test data.\n# 7. Calculate precision and recall:\n# - precision = number of machine said English and data said English\n# -------------------------------------------\n# number of machine said English\n# - recall = number of machine said English and data said English\n# -------------------------------------------\n# number of data said English\n\nimport re\n\nenglish_bigrams = {} # e.g. english_bigrams['th'] = 10 <-- 'th' is found 10 times in the English words\nnotenglish_bigrams = {}\nenglish_count = 0 # start the counts at 0 (these counts are for the number of English and not-English words)\nnotenglish_count = 0\n\n# 1. Separate words into two groups.\n\nfor line in open('words.train'):\n\t#splitted = line.split() # split line by white-space\n\tlabel = line[0] # the first element is the label (0 for English and 1 for not-English)\n\tletters = line[2:].strip() # the rest are letters (the letters in each word)\n\tletters = letters, '###'\n\tletters = ''.join(letters)\n\tmoreletters = '#', letters\n\tmoreletters = ''.join(moreletters)\n\tmoremoreletters = '##', letters\n\tmoremoreletters = ''.join(moremoreletters)\n\tmoremoremoreletters = '###', letters\n\tmoremoremoreletters = ''.join(moremoremoreletters)\n\n\tif label == '0':\n\t\tenglish_count += 1 # if the label is 0, increment the frequency for English words\n\telse:\n\t\tnotenglish_count += 1\n\n# 2. Identify and count bigrams within each group.\n\n\tpattern = '....'\n\tbigrams = re.findall(pattern, letters)\n\tmorebigrams = re.findall(pattern, moreletters)\n\tmoremorebigrams = re.findall(pattern, moremoreletters)\n\tmoremoremorebigrams = re.findall(pattern, moremoremoreletters)\n\tallbigrams = bigrams + morebigrams + moremorebigrams + moremoremorebigrams\n\n\tfor bigram in allbigrams:\n\t\tif label == '0':\n\t\t# increment frequency in english_bigrams\n\t\t\tif bigram in english_bigrams:\n\t\t\t\tenglish_bigrams[bigram] += 1 # if the bigram is already in the dictionary\n\t\t\telse: \n\t\t\t\tenglish_bigrams[bigram] = 1 # if the bigram isn't in the dictionary, add it and assign the count to 1\n\t\telse:\n\t\t# increment frequency in notenglish_bigrams\n\t\t\tif bigram in notenglish_bigrams:\n\t\t\t\tnotenglish_bigrams[bigram] += 1\n\t\t\telse:\n\t\t\t\tnotenglish_bigrams[bigram] = 1\n\n# 3. Smooth.\n\nenglish_bigrams['<<>>'] = 0 # add a dummy bigram called '<>'\nfor bigram in english_bigrams:\n\tenglish_bigrams[bigram] += 1 # apply smoothing by adding 1 to each frequency\nnotenglish_bigrams['<<>>'] = 0\nfor bigram in notenglish_bigrams:\n\tnotenglish_bigrams[bigram] += 1\n\n# 4. To probabilities.\nenglish_total = float(sum(english_bigrams.values())) # total number of bigrams in the English words, after application of smoothing and addition of dummy bigram\nfor bigram in english_bigrams:\n\tenglish_bigrams[bigram] /= english_total # P(bigram|English)\n\nnotenglish_total = float(sum(notenglish_bigrams.values()))\nfor bigram in notenglish_bigrams:\n\tnotenglish_bigrams[bigram] /= notenglish_total # P(bigram|not-English)\n\nenglish_prior = english_count / float(english_count+notenglish_count) # P(English) # probability that a word is English\nnotenglish_prior = notenglish_count / float(english_count+notenglish_count) # P(not English) # probability that a word is not English\n\n# 5. The classifier function\ndef classify(input_bigrams, english_prior, notenglish_prior, english_bigrams, notenglish_bigrams):\n\timport math\n\t# 1. Calculate the English score.\n\tenglish_score = math.log(english_prior)\n\tfor bigram in input_bigrams:\n\t\tenglish_score += math.log(english_bigrams.get(bigram, english_bigrams['<<>>'])) # look up the word's bigrams and calculate the English score (probability)\n\t# 2. Calculate the not-English score.\n\tnotenglish_score = math.log(notenglish_prior)\n\tfor bigram in input_bigrams:\n\t\tnotenglish_score += math.log(notenglish_bigrams.get(bigram, notenglish_bigrams['<<>>']))\n\t# 3. Compare the two scores to classify.\n\tif english_score >= notenglish_score: # this is how the program decides whether an input word should be classified as English (0) or not-English (1)\n\t\treturn '0'\n\telse:\n\t\treturn '1'\n\n#both_said_english = 0\n#we_said_english = 0\n#data_said_english = 0\ntp = 0\nfn = 0\nfp = 0\ntn = 0\ntest_file = open('words2.test')\nfor line in test_file:\n\t#ll = line.split()\n\tanswer = line[0]\n\tinput_letters = line[2:].strip()\n\tinput_letters = input_letters, '###'\n\tinput_letters = ''.join(input_letters)\n\tmore_input_letters = '#', input_letters\n\tmore_input_letters = ''.join(more_input_letters)\n\tmore_more_input_letters = '##', input_letters\n\tmore_more_input_letters = ''.join(more_more_input_letters)\n\tmore_more_more_input_letters = '###', input_letters\n\tmore_more_more_input_letters = ''.join(more_more_more_input_letters)\n\n\tpattern = '....'\n\tinput_bigrams = re.findall(pattern, input_letters)\n\tmore_input_bigrams = re.findall(pattern, more_input_letters)\n\tmore_more_input_bigrams = re.findall(pattern, more_more_input_letters)\n\tmore_more_more_input_bigrams = re.findall(pattern, more_more_more_input_letters)\n\tall_input_bigrams = input_bigrams + more_input_bigrams + more_more_input_bigrams + more_more_more_input_bigrams\n\t\n\tprediction = classify(all_input_bigrams, english_prior, notenglish_prior, english_bigrams, notenglish_bigrams)\n#\tprint '# data: ', line.strip() # what the data said\n#\tprint '# prediction: ', prediction # what the program predicts\n\t#if answer == '0': data_said_english += 1 # increment the counts\n\t#if prediction == '0': we_said_english += 1\n\t#if answer == '0' and prediction == '0': both_said_english += 1\n\tif answer == '0' and prediction =='0' : tp += 1\n\tif answer == '1' and prediction == '0': fp += 1 \n\tif answer == '0' and prediction =='1' : fn += 1\n\tif answer == '1' and prediction == '1' : tn += 1\ntest_file.close()\n\n'''print 'data', data_said_english\nprint 'we', we_said_english\nprint 'both', both_said_english\n\nprecision = float(both_said_english) / we_said_english\nrecall = float(both_said_english) / data_said_english'''\n\naccuracy = float(tp + tn) / (tp + fp + tn +fn)\nprecision = float(tp)/(tp + fp)\nrecall = float(tp)/ (tp + fn)\nfscore = float(precision*recall)/(precision + recall)\n\nprint 'accuracy = ', accuracy\nprint 'precision = ', precision\nprint 'recall = ', recall\nprint 'f-score = ',fscore\n" } ]
2
santiycr/good-news-bad-news
https://github.com/santiycr/good-news-bad-news
e1106407f11c2a062d25060592e6c12b99aac08d
d8dba4b72e1e7cca4c33aae47e89cc96a4e52d00
117ac5682c499bb64a70823cc69da3fc207c363b
refs/heads/master
2021-01-19T04:27:22.619819
2016-06-26T03:15:29
2016-06-26T03:15:29
61,970,480
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6663089990615845, "alphanum_fraction": 0.6727467775344849, "avg_line_length": 33.51234436035156, "blob_id": "cc8b5b95891e7c809ce7faa37f5d822c7bc48070", "content_id": "d9f0727bedb2c8c9b8ddd4c0a28bdf102c79b9c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5592, "license_type": "no_license", "max_line_length": 270, "num_lines": 162, "path": "/alexa-lambda/index.js", "repo_name": "santiycr/good-news-bad-news", "src_encoding": "UTF-8", "text": "\"use strict\";\n/**\n Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Licensed under the Apache License, Version 2.0 (the \"License\"). You may not use this file except in compliance with the License. A copy of the License is located at\n\n http://aws.amazon.com/apache2.0/\n\n or in the \"license\" file accompanying this file. This file is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n*/\n\n/**\n *\n * Examples:\n * One-shot model:\n * User: \"Alexa, ask Good News Bad News for good news\"\n * Alexa: \"Here are today's good news: ...\"\n */\n\n/**\n * App ID for the skill\n */\nvar APP_ID = \"amzn1.echo-sdk-ams.app.4f540de1-1471-413d-8e12-2dfd2dcdaa1b\",\n NEWS_TYPE = {GOOD: 1, BAD: 2},\n GOOD_NEWS_SOURCE = \"https://s3.amazonaws.com/good-news-bad-news/good-news.json\",\n BAD_NEWS_SOURCE = \"https://s3.amazonaws.com/good-news-bad-news/bad-news.json\";\n\nvar AlexaSkill = require('./AlexaSkill');\nvar https = require('https');\n\n/**\n * SpaceGeek is a child of AlexaSkill.\n * To read more about inheritance in JavaScript, see the link below.\n *\n * @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Introduction_to_Object-Oriented_JavaScript#Inheritance\n */\nvar News = function () {\n AlexaSkill.call(this, APP_ID);\n};\n\n// Extend AlexaSkill\nNews.prototype = Object.create(AlexaSkill.prototype);\nNews.prototype.constructor = News;\n\nNews.prototype.eventHandlers.onSessionStarted = function (sessionStartedRequest, session) {\n console.log(\"onSessionStarted requestId: \" + sessionStartedRequest.requestId + \", sessionId: \" + session.sessionId);\n // any initialization logic goes here\n};\n\nNews.prototype.eventHandlers.onLaunch = function (launchRequest, session, response) {\n //console.log(\"onLaunch requestId: \" + launchRequest.requestId + \", sessionId: \" + session.sessionId);\n handleNewsRequest(response);\n};\n\n/**\n * Overridden to show that a subclass can override this function to teardown session state.\n */\nNews.prototype.eventHandlers.onSessionEnded = function (sessionEndedRequest, session) {\n console.log(\"onSessionEnded requestId: \" + sessionEndedRequest.requestId + \", sessionId: \" + session.sessionId);\n // any cleanup logic goes here\n};\n\nNews.prototype.intentHandlers = {\n \"GetGoodNewsIntent\": function (intent, session, response) {\n handleNewsRequest(response, NEWS_TYPE.GOOD);\n },\n \"GetBadNewsIntent\": function (intent, session, response) {\n handleNewsRequest(response, NEWS_TYPE.BAD);\n },\n \"GetDayIntent\": function (intent, session, response) {\n handleGetDay(response);\n },\n\n \"AMAZON.HelpIntent\": function (intent, session, response) {\n response.ask(\"You can say tell me the good news, you can say tell me\" +\n \"the bad news, or you can say how is the day ... What can I\" +\n \"help you with?\", \"What can I help you with?\");\n },\n\n \"AMAZON.StopIntent\": function (intent, session, response) {\n var speechOutput = \"Goodbye\";\n response.tell(speechOutput);\n },\n\n \"AMAZON.CancelIntent\": function (intent, session, response) {\n var speechOutput = \"Goodbye\";\n response.tell(speechOutput);\n }\n};\n\n/**\n * Gets an idea of the type of day going on (good vs bad)\n */\nfunction handleGetDay(response) {\n // Create speech output\n getNewsJson(NEWS_TYPE.GOOD, function(goodNewsJson) {\n getNewsJson(NEWS_TYPE.BAD, function(badNewsJson) {\n var goodCount = goodNewsJson.length;\n var badCount = badNewsJson.length;\n var daySentiment = goodCount >= badCount ? 'good' : 'bad';\n var speechOutput = \"Today was a \" + daySentiment + \" day, with \" + goodCount + \" good news and \" + badCount + \" bad news. Do you want the good or the bad news first?\",\n cardTitle = \"Today's Good and Bad News\";\n response.askWithCard(speechOutput, cardTitle, speechOutput);\n });\n });\n}\n\n/**\n * Gets a random new fact from the list and returns to the user.\n */\nfunction handleNewsRequest(response, newsType) {\n // respond with news based on type (good or bad)\n\n var speech_type = newsType == NEWS_TYPE.GOOD ? \"good\" : \"bad\";\n\n // Create speech output\n getNewsJson(newsType, function(newsJson) {\n var newsBody = buildNews(newsJson);\n var speechOutput = \"Here's are today's \" + speech_type + \" news:\\n\" + newsBody;\n var cardTitle = \"Your News\";\n response.tellWithCard(speechOutput, cardTitle, speechOutput);\n },\n function (error) {\n var speechOutput = \"Something went wrong: \" + error;\n response.tell(speechOutput, speechOutput);\n });\n}\n\nfunction getNewsJson(newsType, callback, errback) {\n var url = newsType == NEWS_TYPE.GOOD ? GOOD_NEWS_SOURCE : BAD_NEWS_SOURCE;\n https.get(url, function(res) {\n var body = '';\n\n res.on('data', function (chunk) {\n body += chunk;\n });\n\n res.on('end', function () {\n callback(JSON.parse(body));\n });\n }).on('error', function (e) {\n console.log(\"Got error: \", e);\n errback(e);\n });\n}\n\nfunction buildNews(newsJson) {\n var newsBody = '';\n\n for (var newsIndex = 0; newsIndex < newsJson.length; newsIndex++){\n var obj = newsJson[newsIndex];\n newsBody = newsBody + obj.title + \"\\n\\n\";\n }\n return newsBody;\n}\n\n// Create the handler that responds to the Alexa Request.\nexports.handler = function (event, context) {\n // Create an instance of the SpaceGeek skill.\n var news = new News();\n news.execute(event, context);\n};\n\n" }, { "alpha_fraction": 0.6065573692321777, "alphanum_fraction": 0.6159749031066895, "avg_line_length": 29.17894744873047, "blob_id": "a58f967867b01d8c377cb82c8c9212ed55032ea7", "content_id": "1b257e18373d4e18d37f9a43d40e8d9b784531fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2867, "license_type": "no_license", "max_line_length": 71, "num_lines": 95, "path": "/populator-lambda/news.py", "repo_name": "santiycr/good-news-bad-news", "src_encoding": "UTF-8", "text": "import boto\nimport boto.s3.connection\nimport json\nimport urllib\nimport urllib2\n\n\nwith open('credentials.json') as f:\n credentials = json.loads(f.read())\n AWS_ACCESS_KEY = credentials['AWS_ACCESS_KEY']\n AWS_SECRET_KEY = credentials['AWS_SECRET_KEY']\n NEWS_API_KEY = credentials['NEWS_API_KEY']\n SENTIMENT_API_KEY = credentials['SENTIMENT_API_KEY']\n\nS3_BUCKET = 'good-news-bad-news'\nS3_BADNEWS = 'bad-news.json'\nS3_GOODNEWS = 'good-news.json'\nCATEGORIES = ['World', 'Politics', 'US']\nNEWS_BASE_URL = 'https://api.cognitive.microsoft.com/bing/v5.0/news'\nSENTIMENT_BASE_URL = \\\n 'https://api.havenondemand.com/1/api/sync/analyzesentiment/v1'\n\n\ndef clean(text):\n # Remove non-ascii characters from text.\n return ''.join([i if ord(i) < 128 else '' for i in text])\n\n\ndef fetch_articles(category):\n url = '%s?Category=%s' % (NEWS_BASE_URL, category)\n headers = {'Ocp-Apim-Subscription-Key': NEWS_API_KEY}\n\n req = urllib2.Request(url, headers=headers)\n resp = urllib2.urlopen(req)\n articles = json.loads(resp.read())['value']\n return zip([clean(a['name']) for a in articles],\n [clean(a['description']) for a in articles])\n\n\ndef get_sentiment(text):\n values = {'apikey': SENTIMENT_API_KEY,\n 'text': text}\n\n data = urllib.urlencode(values)\n req = urllib2.Request(SENTIMENT_BASE_URL, data)\n resp = urllib2.urlopen(req)\n result = json.loads(resp.read())\n return result['aggregate']\n\n\ndef upload_to_s3(name, content):\n conn = boto.connect_s3(\n aws_access_key_id=AWS_ACCESS_KEY,\n aws_secret_access_key=AWS_SECRET_KEY)\n bucket = conn.create_bucket(S3_BUCKET)\n key = bucket.new_key(name)\n key.set_contents_from_string(content)\n key.set_acl('public-read')\n\n\ndef get_news():\n results = []\n for category in CATEGORIES:\n for a in fetch_articles(category):\n title = a[0]\n abstract = a[1]\n sentiment = get_sentiment(abstract)\n if sentiment['score'] != 0.0:\n results.append({'title': title,\n 'abstract': abstract,\n 'sentiment': sentiment})\n return results\n\n\ndef handler(event, context):\n news = get_news()\n badnews = json.dumps(\n [x for x in news if x['sentiment']['sentiment'] == 'negative'])\n goodnews = json.dumps(\n [x for x in news if x['sentiment']['sentiment'] == 'positive'])\n upload_to_s3(S3_BADNEWS, badnews)\n upload_to_s3(S3_GOODNEWS, goodnews)\n\n\nif __name__ == '__main__':\n # handler(None, None)\n news = get_news()\n badnews = json.dumps(\n [x for x in news if x['sentiment']['sentiment'] == 'negative'])\n goodnews = json.dumps(\n [x for x in news if x['sentiment']['sentiment'] == 'positive'])\n print 'Bad news'\n print badnews\n print 'Good news'\n print goodnews\n" } ]
2
dohie11/csf
https://github.com/dohie11/csf
0327608ab042f9488c9ffdd606fe5350e452fe79
eaf7724b58dae13c62e49ed3450f09551d6c2828
3477f5f9209550f62508c50adc651c0b186fb036
refs/heads/master
2016-08-02T20:22:27.811331
2013-12-14T01:00:28
2013-12-14T01:00:28
13,252,433
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5819398164749146, "alphanum_fraction": 0.6220735907554626, "avg_line_length": 28.899999618530273, "blob_id": "4d26a69343d6b2a2c0123c02f064607c08ed4711", "content_id": "ad54470b33d5d75027c8b59dfa1776a900378922", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "no_license", "max_line_length": 150, "num_lines": 10, "path": "/problem2.py", "repo_name": "dohie11/csf", "src_encoding": "UTF-8", "text": "#Name: Hien Do & Huong Le\n#Evergreen Login: dohie11 & lethi17\n#Computer Science Foundation\n#Programming as a way of life\n#Homework1\n\n\nimport hw1_test\n\nprint ( str(hw1_test.a) + \"\\n\" + str(hw1_test.b) + \"\\n\" + str(hw1_test.c) + \"\\n\" + str(hw1_test.d) + \"\\n\" + str(hw1_test.e) + \"\\n\" + str(hw1_test.f) )\n" }, { "alpha_fraction": 0.46357616782188416, "alphanum_fraction": 0.5695364475250244, "avg_line_length": 13, "blob_id": "c52a73878f2af6744a9adc8c56c4cb7133af7475", "content_id": "3e8b661d955c8c66cc92a2377c4cd6dc6aa764ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 151, "license_type": "no_license", "max_line_length": 25, "num_lines": 10, "path": "/quiz2.py", "repo_name": "dohie11/csf", "src_encoding": "UTF-8", "text": "\nimport lab2\n\n\nif lab2.n > 50:\n print \"greater than 50\"\n \nelif lab2.n == 50 :\n print \"equal 50\"\nelif lab2.n < 50 :\n print \"less than 50\"\n \n \n \n\n" }, { "alpha_fraction": 0.5949522852897644, "alphanum_fraction": 0.6044936776161194, "avg_line_length": 24.76984214782715, "blob_id": "495c6156ed5453c4ca8162dd37cace34286e01f3", "content_id": "8d844d97b55ba653f23dc0525f616a02ce00169f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3249, "license_type": "no_license", "max_line_length": 86, "num_lines": 126, "path": "/dna_analysisHW4.py", "repo_name": "dohie11/csf", "src_encoding": "UTF-8", "text": "# Name: Hien Do\n# Evergreen Login: dohie11\n# Computer Science Foundations\n# Programming as a Way of Life\n# Homework 4: DNA analysis \n\n# This program reads DNA sequencer output and computes statistics, such as\n# the GC content. Run it from the command line like this:\n# python dna_analysis.py myfile.fastq\n\n\n###########################################################################\n### Libraries\n###\n\n# The sys module supports reading files, command-line arguments, etc.\nimport sys\n\n\n###########################################################################\n### Read the nucleotides into a variable named seq\n###\n\n# You need to specify a file name\nif len(sys.argv) < 2:\n print \"You must supply a file name as an argument when running this program.\"\n sys.exit(2)\n# The file name specified on the command line, as a string.\nfilename = sys.argv[1]\n# A file object from which data can be read.\ninputfile = open(filename)\n\n# All the nucleotides in the input file that have been read so far.\nseq = \"\"\n# The current line number (= the number of lines read so far).\nlinenum = 0\n\n\nfor line in inputfile:\n linenum = linenum + 1\n # if we are on the 2nd, 6th, 10th line...\n if linenum % 4 == 2:\n # Remove the newline characters from the end of the line\n line = line.rstrip()\n seq = seq + line\n\n \n\n\n###########################################################################\n### Compute statistics\n###\n\n# Total nucleotides seen so far.\ntotal_count = 0\ng_count = 0\nc_count = 0\na_count = 0\nt_count = 0\ninvalidcount = 0\n\n\n### Got bored want to change this to function and see if it works\n### but cant get it to work then Alex came over and said we are not suppose\n### to change it to a function.\n#def bps(n)\n# if bp == 'n'\n# n = n + 1\n# return n\n\n\n#g_count = bps(g)\n#c_count = bps(c)\n#a_count = bps(a)\n#t_count = bps(t)\n\n# for each base pair in the string,\nfor bp in seq:\n # increment the total number of bps we've seen\n total_count = total_count + 1\n # next, if the bp is a G or a C,\n if bp == 'G':\n g_count += 1\n elif bp == 'C':\n c_count += 1\n elif bp == 'A':\n a_count += 1\n elif bp == 'T':\n t_count += 1\n else:\n invalidcount += 1\n \n \n \n\n\n# reduced a few lines by moving gc_content. G+C and A+T is the same as GC and AT alone\ngc_content = float(g_count+c_count) / total_count\nat_content = float(a_count+t_count) / total_count\n\n# GC/AT Ratio using the counts \n## Float give decimals. Without it it doesnt. I forgot about this.\n## too many hours\ngcat_ratio = float(g_count+c_count) / (a_count+t_count)\n\n# Calculate if high, low , or moderate using else if statements.\nif gc_content >= 0.6:\n\tgclowhigh = 'High GC content'\nelif gc_content < 0.4:\n\tgclowhigh = 'Low GC content'\nelse:\n\tgclowhigh = 'Moderate GC content'\n\n# Print the answer\nprint 'GC-content:', gc_content\nprint 'AT-content:', at_content\nprint 'G: ', g_count\nprint 'C: ', c_count\nprint 'A: ', a_count\nprint 'T: ', t_count\nprint 'Sum:', (a_count + c_count + g_count + t_count)\nprint 'Invalid Count:', invalidcount\nprint 'Total Count (sum & invalids):', total_count\nprint 'AT-GC Ratio:', gcat_ratio\nprint 'Seg Length', len(seq)\nprint 'GC Classification: ', gclowhigh\n\n\n" }, { "alpha_fraction": 0.6393362879753113, "alphanum_fraction": 0.6700829863548279, "avg_line_length": 20.34375, "blob_id": "705197c4f742f4866aaccd05623d3f5cdb09c46e", "content_id": "36c22da7b358ad8e8d1c99502004ba740cf55ed8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2049, "license_type": "no_license", "max_line_length": 100, "num_lines": 96, "path": "/hw6.py", "repo_name": "dohie11/csf", "src_encoding": "UTF-8", "text": "# Name: Group 16: Hien Do, Travis Goodroad, Amber Hare\n# Evergreen Login: dohie11 , gootra28, hartra06\n# Computer Science Foundations\n# Programming as a Way of Life\n# Homework 6: create function : #Group pair Group 23 \n\t\t\t\t\t\t\t\t#Group Member 1 Hannah Spencer \n\t\t\t\t\t\t\t\t#Group Member 2 Russ Arnold \n\n# You may do your work by editing this file, or by typing code at the\n# command line and copying it into the appropriate part of this file when\n# you are done. When you are done, running this file should compute and\n# print the answers to all the problems.\n\n\n###\n### Problem 3\n### Recognizing Types\n\n\n\nGood morning!\n['pink', 'yellow', 'green', 'red', 'purple', 'blue', 'orange']\n{'Karen': 22, 'Bob': 20, 'Lucy': 21, 'Adam': 21}\n[{'January': 1, 'December': 12}, {'January 1st': 'New Years', 'December 25th': 'Christmas'}]\n\n\n# This is just create a list in dictionary\n\n###\n### Problem 4\n### Identifying Values With an Index\n\n \n antelope penguin bear\n \n# for problem for the index values starts at zoo(index value).\n# for zoo2 its not possible because zoo2 has only 1 item in the list therefore index is only len = 0\n\n\n\n\n###\n### Problem 5: An Index Within an Index \n###\n#code:\ncandy = {\"sour patch kids\":22, \"sour gummy worms\": 10, \"m&ms\": 13, \"snickers\": 1, \"laffy taffy\": 5}\n\nvalue_list = candy.values()\n\nfor i in range(0, len(value_list)):\n print value_list[i]\n\n\n#answer:\n10\n5\n13\n1\n22\n\n###\n### Problem 6: Using a For Loop With a Dictionary\n### code fixed\ndrinks = {\"redbull\": 3, \"water\": 4, \"fresca\": 1, \"pom tea\": 1, \"rockstar\": 2}\n\nfor i in drinks:\n print i\n\n# Answers: For this problem 6, we just use the \"for loop\" to pull items out \nwater\nfresca\nrockstar\nredbull\npom tea\n\n\n\n###\n### Problem 7: Understanding and Using Assert\n###\n\n# This problem use to verify the columns or rows match or not \n# For example:\n\nassert 1 == 0\n# this does not verify that it matches. Therefore this program will stop right here.\n\nassert x == x\n\n#this verify that it match therefore the program will continue to excute other codes.\n\n\n\n###\n### Collaboration: Travis Goodroad, Amber Hare. ( Group 16)\n###\n" }, { "alpha_fraction": 0.5721932649612427, "alphanum_fraction": 0.5855399370193481, "avg_line_length": 32.0448112487793, "blob_id": "d7ac3c06b7b27381da45a913865c80ddc0962386", "content_id": "cc254b7f54b0716d1e59b732599566c436d098b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14011, "license_type": "no_license", "max_line_length": 86, "num_lines": 424, "path": "/electionHW7.py", "repo_name": "dohie11/csf", "src_encoding": "UTF-8", "text": "# Name: ...\n# Evergreen Login: ...\n# Programming as a Way of Life\n# Homework 5: Election prediction\n\nimport csv\nimport os\nimport time\n\ndef read_csv(path):\n \"\"\"\n Reads the CSV file at path, and returns a list of rows. Each row is a\n dictionary that maps a column name to a value in that column, as a string.\n \"\"\"\n output = []\n for row in csv.DictReader(open(path)):\n output.append(row)\n return output\n\n\n################################################################################\n# Problem 1: State edges\n################################################################################\n\ndef row_to_edge(row):\n \"\"\"\n Given an election result row or poll data row, returns the Democratic edge\n in that state.\n \"\"\"\n return float(row[\"Dem\"]) - float(row[\"Rep\"]) \n\ndef state_edges(election_result_rows):\n \"\"\"\n Given a list of election result rows, returns state edges.\n The input list does has no duplicate states;\n that is, each state is represented at most once in the input list.\n \"\"\"\n #TODO: Implement this function\n \ndictionary={}\n for i in range(len(election_result_rows)):\n rowDictionary = election_result_rows[i]\n state = rowDictionary['State']\n edge = row_to_edge(rowDictionary)\n tempDictionary = {state : edge}\n dictionary = dict(tempDictionary.items()+dictionary.items())\n return dictionary\n\n################################################################################\n# Problem 2: Find the most recent poll row\n################################################################################\n\ndef earlier_date(date1, date2):\n \"\"\"\n Given two dates as strings (formatted like \"Oct 06 2012\"), returns True if \n date1 is after date2.\n \"\"\"\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))\n\ndef most_recent_poll_row(poll_rows, pollster, state):\n \"\"\"\n Given a list of poll data rows, returns the most recent row with the\n specified pollster and state. If no such row exists, returns None.\n \"\"\"\n #TODO: Implement this function\n\tif len(stateSet)== 0:\n poll= None\n \n \n length= len(stateSet)-1\n if length==0:\n poll=stateSet[0]\n for i in range(length):\n rowDictionary= stateSet[i]\n date1= rowDictionary['Date']\n rowDictionary2= stateSet[i+1]\n date2= rowDictionary2['Date']\n checkDate= earlier_date(date1, date2)\n if checkDate== True:\n poll=rowDictionary2\n else:\n poll=rowDictionary\n stateSet[i+1]=stateSet[i]\n return poll\n\n\n\n################################################################################\n# Problem 3: Pollster predictions\n################################################################################\n\ndef unique_column_values(rows, column_name):\n \"\"\"\n Given a list of rows and the name of a column (a string), returns a set\n containing all values in that column.\n \"\"\"\n #TODO: Implement this function\n \n\ndef pollster_predictions(poll_rows):\n \"\"\"\n Given a list of poll data rows, returns pollster predictions.\n \"\"\"\n #TODO: Implement this function\n # takes a list of dictionaries and creates a dictionary of {column_name: [values]}\n \n dictionary={}\n for i in range(len(rows)):\n rowDictionary=rows[i]\n dictionary.setdefault(column_name, set()).add(rowDictionary[column_name])\n return dictionary[column_name]\n\ndef pollster_predictions(poll_rows):\n \"\"\"\n Given a list of poll data rows, returns pollster predictions.\n \"\"\"\n\n # takes a list of dictionaries and creates a list of dictionaries\n # with only the most recent polls\n \n recentSet=[]\n for i in range(len(poll_rows)):\n rowDictionary=poll_rows[i]\n state=rowDictionary[\"State\"]\n pollster=rowDictionary[\"Pollster\"]\n recentDict=most_recent_poll_row(poll_rows, pollster, state)\n recentSet=recentSet + [recentDict]\n \n \n # takes recent set and creates a list of the unique Pollsters\n # in the set\n pollsterDict={}\n pollsterList=list(unique_column_values(recentSet, \"Pollster\"))\n \n # for every unique pollster it checks to make sure that the dictionary\n # in the list is of that pollster. If it is then the loop takes the\n # edge of that dictionary and compiles a dictionary of state edges.\n # then those state edges are put in a dictionary {Pollster: stateEdges}\n \n for i in range(len(pollsterList)):\n pollster=pollsterList[i]\n stateEdge={}\n for j in range(len(recentSet)):\n rowDictionary=recentSet[j]\n dictPollster=rowDictionary[\"Pollster\"]\n if dictPollster == pollster:\n rowDictionary=[rowDictionary]\n stateEdge=dict(state_edges(rowDictionary).items()+stateEdge.items())\n pollsterDict[pollster]=stateEdge \n return pollsterDict\n\n \n################################################################################\n# Problem 4: Pollster errors\n################################################################################\n\ndef average_error(state_edges_predicted, state_edges_actual):\n \"\"\"\n Given predicted state edges and actual state edges, returns\n the average error of the prediction.\n \"\"\"\n #TODO: Implement this function\n pass\n\ndef pollster_errors(pollster_predictions, state_edges_actual):\n \"\"\"\n Given pollster predictions and actual state edges, retuns pollster errors.\n \"\"\"\n #TODO: Implement this function\ndef average_error(state_edges_predicted, state_edges_actual):\n \"\"\"\n Given predicted state edges and actual state edges, returns\n the average error of the prediction.\n \"\"\"\n count=0\n error=0\n averageError=0\n \n # takes the dictionary of predicted and the dictionary of edges and\n # checks if they are the same state. If they are the count increments\n # and the error is calculated. After all the states are checked the average\n # error is computed.\n \n for i in state_edges_predicted:\n for j in state_edges_actual:\n if i==j:\n count= count+1\n tempError=abs(state_edges_predicted[i]-state_edges_actual[j])\n error= error+tempError\n if count>0:\n averageError=error/count\n return averageError \n \ndef pollster_errors(pollster_predictions, state_edges_actual):\n \"\"\"\n Given pollster predictions and actual state edges, retuns pollster errors.\n \"\"\"\n \n # takes two dictionaries and returns the average error by pollster in a dictionary\n errorDict={}\n for i in pollster_predictions:\n errorDict[i]=average_error(pollster_predictions[i], state_edges_actual)\n return errorDict\n\n\n################################################################################\n# Problem 5: Pivot a nested dictionary\n################################################################################\n\ndef pivot_nested_dict(nested_dict):\n \"\"\"\n Pivots a nested dictionary, producing a different nested dictionary\n containing the same values.\n The input is a dictionary d1 that maps from keys k1 to dictionaries d2,\n where d2 maps from keys k2 to values v.\n The output is a dictionary d3 that maps from keys k2 to dictionaries d4,\n where d4 maps from keys k1 to values v.\n For example:\n input = { \"a\" : { \"x\": 1, \"y\": 2 },\n \"b\" : { \"x\": 3, \"z\": 4 } }\n output = {'y': {'a': 2},\n 'x': {'a': 1, 'b': 3},\n 'z': {'b': 4} }\n \"\"\"\n #TODO: Implement this function\n stateDict={}\n for i in nested_dict:\n dict2=nested_dict[i]\n for j in dict2:\n if j not in stateDict:\n stateDict[j] = {i: dict2[j]}\n else:\n stateDict[j][i] = dict2[j]\n return stateDict\n\n\n################################################################################\n# Problem 6: Average the edges in a single state\n################################################################################\n\ndef average_error_to_weight(error):\n \"\"\"\n Given the average error of a pollster, returns that pollster's weight.\n The error must be a positive number.\n \"\"\"\n return error ** (-2)\n\n# The default average error of a pollster who did no polling in the\n# previous election.\nDEFAULT_AVERAGE_ERROR = 5.0\n\ndef pollster_to_weight(pollster, pollster_errors):\n \"\"\"\"\n Given a pollster and a pollster errors, return the given pollster's weight.\n \"\"\"\n if pollster not in pollster_errors:\n weight = average_error_to_weight(DEFAULT_AVERAGE_ERROR)\n else:\n weight = average_error_to_weight(pollster_errors[pollster])\n return weight\n\n\ndef weighted_average(items, weights):\n \"\"\"\n Returns the weighted average of a list of items.\n \n Arguments:\n items is a list of numbers.\n weights is a list of numbers, whose sum is nonzero.\n \n Each weight in weights corresponds to the item in items at the same index.\n items and weights must be the same length.\n \"\"\"\n assert len(items) > 0\n assert len(items) == len(weights)\n #TODO: Implement this function\n pass\n\n\ndef average_edge(pollster_edges, pollster_errors):\n \"\"\"\n Given pollster edges and pollster errors, returns the average of these edges\n weighted by their respective pollster errors.\n \"\"\"\n #TODO: Implement this function\nDEFAULT_AVERAGE_ERROR = 5.0\n\ndef pollster_to_weight(pollster, pollster_errors):\n \"\"\"\"\n Given a pollster and a pollster errors, return the given pollster's weight.\n \"\"\"\n if pollster not in pollster_errors:\n weight = average_error_to_weight(DEFAULT_AVERAGE_ERROR)\n else:\n weight = average_error_to_weight(pollster_errors[pollster])\n return weight\n\n\ndef weighted_average(items, weights):\n \"\"\"\n Returns the weighted average of a list of items.\n \n Arguments:\n items is a list of numbers.\n weights is a list of numbers, whose sum is nonzero.\n \n Each weight in weights corresponds to the item in items at the same index.\n items and weights must be the same length.\n \"\"\"\n assert len(items) > 0\n assert len(items) == len(weights)\n \n top=0.0\n botSum=0.0\n for i in range(len(items)):\n topProduct=float(items[i]*weights[i])\n botSum=float(botSum + weights[i])\n top= top + topProduct\n \n weightedAvg= top/botSum\n return weightedAvg \n\ndef average_edge(pollster_edges, pollster_errors):\n \"\"\"\n Given pollster edges and pollster errors, returns the average of these edges\n weighted by their respective pollster errors.\n \"\"\"\n weights=[]\n items=[]\n for i in pollster_edges:\n items.append(pollster_edges[i])\n weights.append(pollster_to_weight(i, pollster_errors))\n \n avgEdge=weighted_average(items, weights)\n return avgEdge\n\n \n################################################################################\n# Problem 7: Predict the 2012 election\n################################################################################\n\ndef predict_state_edges(pollster_predictions, pollster_errors):\n \"\"\"\n Given pollster predictions from a current election and pollster errors from\n a past election, returns the predicted state edges of the current election.\n \"\"\"\n #TODO: Implement this function\n pass\n \n\n################################################################################\n# Electoral College, Main Function, etc.\n################################################################################\n\ndef electoral_college_outcome(ec_rows, state_edges):\n \"\"\"\n Given electoral college rows and state edges, returns the outcome of\n the Electoral College, as a map from \"Dem\" or \"Rep\" to a number of\n electoral votes won. If a state has an edge of exactly 0.0, its votes\n are evenly divided between both parties.\n \"\"\"\n ec_votes = {} # maps from state to number of electoral votes\n for row in ec_rows:\n ec_votes[row[\"State\"]] = float(row[\"Electors\"])\n\n outcome = {\"Dem\": 0, \"Rep\": 0}\n for state in state_edges:\n votes = ec_votes[state]\n if state_edges[state] > 0:\n outcome[\"Dem\"] += votes\n elif state_edges[state] < 0:\n outcome[\"Rep\"] += votes\n else:\n outcome[\"Dem\"] += votes/2.0\n outcome[\"Rep\"] += votes/2.0\n return outcome\n\n\ndef print_dict(dictionary):\n \"\"\"\n Given a dictionary, prints its contents in sorted order by key.\n Rounds float values to 8 decimal places.\n \"\"\"\n for key in sorted(dictionary.keys()):\n value = dictionary[key]\n if type(value) == float:\n value = round(value, 8)\n print key, value\n\n\ndef main():\n \"\"\"\n Main function, which is executed when election.py is run as a Python script.\n \"\"\"\n # Read state edges from the 2008 election\n edges_2008 = state_edges(read_csv(\"data/2008-results.csv\"))\n \n # Read pollster predictions from the 2008 and 2012 election\n polls_2008 = pollster_predictions(read_csv(\"data/2008-polls.csv\"))\n polls_2012 = pollster_predictions(read_csv(\"data/2012-polls.csv\"))\n \n # Compute pollster errors for the 2008 election\n error_2008 = pollster_errors(polls_2008, edges_2008)\n \n # Predict the 2012 state edges\n prediction_2012 = predict_state_edges(polls_2012, error_2008)\n \n # Obtain the 2012 Electoral College outcome\n ec_2012 = electoral_college_outcome(read_csv(\"data/2012-electoral-college.csv\"),\n prediction_2012)\n \n print \"Predicted 2012 election results:\"\n print_dict(prediction_2012)\n print\n \n print \"Predicted 2012 Electoral College outcome:\"\n print_dict(ec_2012)\n print \n\n\n# If this file, election.py, is run as a Python script (such as by typing\n# \"python election.py\" at the command shell), then run the main() function.\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6391670107841492, "alphanum_fraction": 0.6680015921592712, "avg_line_length": 14.710691452026367, "blob_id": "b7924f346ff8af24e9c7d1270d8fb3908a34a221", "content_id": "9fcd0731dd269d27e5e8f087099543cef95dfc6e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2497, "license_type": "no_license", "max_line_length": 149, "num_lines": 159, "path": "/HW2/hw2.py", "repo_name": "dohie11/csf", "src_encoding": "UTF-8", "text": "# Name: Hien Do \n# Evergreen Login: dohie11\n# Computer Science Foundations\n# Programming as a Way of Life\n# Homework 2\n\n# You may do your work by editing this file, or by typing code at the\n# command line and copying it into the appropriate part of this file when\n# you are done. When you are done, running this file should compute and\n# print the answers to all the problems.\n\n\n###\n### Problem 1\n###\n\n# DO NOT CHANGE THE FOLLOWING LINE\nprint \"Problem 1 solution follows:\"\n\nn = 100\n\naccumulator = 0\n\ni = 1\n\nwhile i <= n :\n\t\n\taccumulator = accumulator + i\n\t\n\ti = i + 1\n\t\nprint accumulator\n\n\n\n###\n### Problem 2\n###\n\n# DO NOT CHANGE THE FOLLOWING LINE\nprint \"Problem 2 solution follows:\"\n\n\nl = range( 2, 11 )\n\nfor i in l:\n\t\n\tprint 1.0/i\n\t\n\t\n\n###\n### Problem 3\n###\n\n# DO NOT CHANGE THE FOLLOWING LINE\nprint \"Problem 3 solution follows:\"\n\nn = 10\n\naccumulator = 0\n\nfor i in range ( 1, n + 1 ):\n\n\taccumulator = accumulator + i \n\nprint accumulator\n\nprint (n * ( n + 1 )) / 2 \n\n\n###\n### Problem 4\n###\n\n# DO NOT CHANGE THE FOLLOWING LINE\nprint \"Problem 4 solution follows:\"\nimport math\n\nmath.factorial(10)\n\nn = 10\n\naccumulator = 1\n\nfor i in range ( 1 , n + 1 ) :\n\n\taccumulator = accumulator * i\n\t\nprint accumulator\n\t\n###\n### Problem 5\n###\n\n# DO NOT CHANGE THE FOLLOWING LINE\nprint \"Problem 5 solution follows:\"\n\nimport math\n\nmath.factorial(10)\n\nn = 10\n\nwhile n > 0:\n\n\taccumulator = 1\n\t\n\tfor i in range ( 1 , n + 1 ) :\n\n\t\taccumulator = accumulator * i\n\t\n\tprint accumulator\n\tn = n - 1\n\t\n###\n### Problem 6\n###\n\n# DO NOT CHANGE THE FOLLOWING LINE\nprint \"Problem 6 solution follows:\"\n\nimport math\n\nmath.factorial(10)\n\nn = 1\n\naccumulator2 = 1.0\n\nwhile n <= 10:\n\n\taccumulator = 1\n\t\n\tfor i in range ( 1 , n + 1 ) :\n\n\t\taccumulator = accumulator * i\n\t\n\taccumulator2 = accumulator2 + ( 1.0 / accumulator)\n\tn = n + 1\n\n\t\nprint accumulator2\n\n###\n### Collaboration\n###\n\n# ... List your collaborators and other sources of help here (websites, books, etc.),\n# ... as a comment (on a line starting with \"#\").\n\n###\n### Reflection\n###\n\n# its about 16 hours for me to complete the homework. The lecture is very helpful for me , however, I can not catch up all the information in class. \n# I used math skills to find out good way to finish the problem first, then come back with python right after that.\n# There are some code I could not understand, the tutor have to spend 30-45 minutes to explain it for me.\n# They explain what the different between \"for\" and \" while\" for me, which I could not understand clearly in class." }, { "alpha_fraction": 0.6464285850524902, "alphanum_fraction": 0.6892856955528259, "avg_line_length": 24.454545974731445, "blob_id": "cc1cf65f0e70ac4b043d2b8d453bf03bfe77035b", "content_id": "9375959efac456497461ad86a21b0e7f4c2a0bff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 280, "license_type": "no_license", "max_line_length": 114, "num_lines": 11, "path": "/problem3.py", "repo_name": "dohie11/csf", "src_encoding": "UTF-8", "text": "#Name: Hien Do & Huong Le\n#Evergreen Login: dohie11 & lethi17\n#Computer Science Foundation\n#Programming as a way of life\n#Homework1\n\n\nimport hw1_test\n\nresult = (( hw1_test.a and hw1_test.b ) or ( not hw1_test.c ) and not ( hw1_test.d or hw1_test.e or hw1_test.f ))\nprint ( result )\n" }, { "alpha_fraction": 0.7577319741249084, "alphanum_fraction": 0.7635309100151062, "avg_line_length": 44.661766052246094, "blob_id": "56df723d35d15e24584e88cd0cb509b68a8f9909", "content_id": "36d00c0af1a117490207e5312e734b24653efb76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3104, "license_type": "no_license", "max_line_length": 158, "num_lines": 68, "path": "/hw6-ours.py", "repo_name": "dohie11/csf", "src_encoding": "UTF-8", "text": "# Name: Group 16: Hien Do, Travis Goodroad, Amber Hare\n# Evergreen Login: dohie11 , gootra28, hartra06\n# Computer Science Foundations\n# Programming as a Way of Life\n# Homework 6\n\n# You may do your work by editing this file, or by typing code at the\n# command line and copying it into the appropriate part of this file when\n# you are done. When you are done, running this file should compute and\n# print the answers to all the problems.\n\n\"\"\"\nProblem 3\nFirst let's take a look at how nested data structures function so we can understand how best to utilize them\nWhat kind of data structures do we already know about?\nCreate one of each and assign them all do a different value.\nHow can we combine these different kinds of data structures to create nested data structures?\nCreate a list of tuples, a list of dictionaries and one other nested data structure of your choice.\n\"\"\"\n\n#put your data structures here\n\n\"\"\"\nProblem 4\nLet's take a closer look at how dictionaries work.\nA dictionary is a data structure that maps keys to values, much in the same way that physical dictionaries match words to definitions.\nCreate an dictionary called Hamburger with key & value pairs Condiment:Ketchup, Meat:Beef, Bun:Sesame.\n\"\"\"\n\n#put the dictionary here\n\n\"\"\"\nProblem 5\nNow that we have our basic hamburger dictionary, we can begin to add more things to it.\nAdding to dictionaries does not use the add() or append() functions used in lists.\nTo add a key & value pair to a dictionary called \"okay\" we have to use the function okay['key'] = value\nAdd 3 of your favorite toppings to the hamburger dictionary, assigning them all to different keys (i.e. Lettuce:Romaine, Cheese:Cheddar)\n\n\"\"\"\n\n#put your code here\n\n\"\"\"\nProblem 6\nOur hamburger is now almost done but you suddenly realized that you would like to change a few of the items on the burger.\nWe first have to know how to index and locate items in the dictionary.\nFortunately, python makes this very easy for us.\nAll we have to do is enter the name of the dictionary and then index it with the key.\nFor example, say you want to find the value of the key \"Cereal\" in the dictionary \"Breakfasts\".\nAll we have to do is enter Breakfast[Cereal] (which in this case would return \"Not a real breakfast\").\nWe can use this to change the value assigned to a key in the same way we can reassign values to variables.\nIf we want to change the value of the \"Cereal\" key from \"Not a real breakfast\" to \"Delicious\" we can enter Breakfast[Cereal] = \"Delicious\"\nChange the \"Bun\" on your \"Hamburger\" to \"Toasted\" and the \"Condiment\" to \"Ketchup and Mustard\".\n\n\"\"\"\n\n#put your code here\n\n\"\"\"\nProblem 7\nLet's now think about how this applies to the problem in hw5.\nWe are given a large list of states to assign edge values to.\nThe formula for finding the values of the edge is already given to us so we just need to figure out a way to assign a bunch of values to keys in a given list.\nFor loops can be very helpful but figuring out what to iterate them over can be tricky.\nWhat would a for loop used for problem 1 in hw5 iterate over?\n\"\"\"\n\n#put your for loop here, add a pass keyword inside the loop" }, { "alpha_fraction": 0.71875, "alphanum_fraction": 0.71875, "avg_line_length": 7, "blob_id": "f10cea985c7d78612a6e406778f7a96ed98b9f91", "content_id": "faf1aa8b79c88067871d97059fed9aba4af47b8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 32, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/README.md", "repo_name": "dohie11/csf", "src_encoding": "UTF-8", "text": "csf\n===\n\nEvergreen CSF Homework\n" }, { "alpha_fraction": 0.38983049988746643, "alphanum_fraction": 0.4203389883041382, "avg_line_length": 12.952381134033203, "blob_id": "3210af3696fbe66d6b60cc7f198f1a038a80be2a", "content_id": "f1b71a14948ac07aacf25d432ee2487437f6ddbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 27, "num_lines": 21, "path": "/lap3.py", "repo_name": "dohie11/csf", "src_encoding": "UTF-8", "text": "\n\nn = 19\n\nseries = \"Fibonacci\"\n\nif series == \"Fibonacci\":\n a = 0\n b = 1\n for i in range( n - 1 ):\n\t\ta = b\n\t\tb = a+b\n\t\tprint a\nelif series == \"sum\":\n i = 0\n l = 0\n while i < ( 3 * n ):\n x = 3\n i =+ (i+x)\n l = i + l\n print l\nelse:\n print \"Invalid String\"\n" }, { "alpha_fraction": 0.4811529815196991, "alphanum_fraction": 0.5365853905677795, "avg_line_length": 15.666666984558105, "blob_id": "ffac008036c250a6e2f855175a5e5c114692ef0a", "content_id": "268040966d5354fcf4e921f5c8f47a030393bb0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 43, "num_lines": 27, "path": "/problem1.py", "repo_name": "dohie11/csf", "src_encoding": "UTF-8", "text": "#Name: Hien Do & Huong Le\n#Evergreen Login: dohie11 & lethi17\n#Computer Science Foundation\n#Programming as a way of life\n#Homework1\n\n\n\nimport math\n\n\ndef solver( a, b, c):\n\tdelta = (b*b) - (4*b*c)\n\n\tif delta < 0 :\n\t\tprint (\"None\")\n\t\n\telif delta == 0 :\n\t\tx = -b / (2 * a)\n\t\tprint(x)\n\t\n\telif delta > 0 :\n\t\tx1 = ( -b + math.sqrt( delta )) / (2 * a)\n\t\tx2 = ( -b - math.sqrt( delta )) / (2 * a)\n\t\tprint ( str(x1) + \" \" + str(x2) )\n\t\t\nsolver(1, -5.86, 8.5408)\n\n" } ]
11
natmat/LogDater
https://github.com/natmat/LogDater
e88845b9043cbdeb1db2a51df09dc037c587a6cf
36be986973285cac19e371144613a8d6ae435789
9a50cc989ec796b0e110fd2ef235027d5e013260
refs/heads/master
2022-06-21T16:02:24.251910
2020-05-05T17:55:11
2020-05-05T17:55:11
261,279,928
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6313029527664185, "alphanum_fraction": 0.6421944499015808, "avg_line_length": 30.392404556274414, "blob_id": "92a9e381d676972c19d050dacaabb3a856c05719", "content_id": "230bfeb95b5538d64326b095a2641a88092901d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2479, "license_type": "no_license", "max_line_length": 83, "num_lines": 79, "path": "/LogDater.py", "repo_name": "natmat/LogDater", "src_encoding": "UTF-8", "text": "from sys import exit\nimport tkinter as tk\nfrom tkinter import filedialog, messagebox\n\n'''\nFunction to open file chooser and prompt user to enter log file\nreturns the opened log file, or exits\n'''\ndef openFileChooser():\n root = tk.Tk()\n root.withdraw()\n file = 'Logs/asdo-10.177.176.21.log'\n file = 'Logs/tmp.log'\n file = filedialog.askopenfilename(#\n initialdir = \"C:\\Logs\",\n filetypes = ((\"log files\",\"*.log\"),(\"all files\",\"*.*\")))\n if not file:\n tk.messagebox.showerror(\"Error\", \"Error: must select a log file\")\n exit(1)\n return file\n\n'''\nUsage: LogDater [logfile]...\nUse either the cmd ine arg file, or open file chooser\n'''\nimport sys\nif len(sys.argv) == 2:\n log_file = sys.argv[1]\nelse:\n log_file = openFileChooser()\n\nprint(\"Reading data from {}\".format(log_file))\n\nimport re\nimport time\nfrom datetime import datetime\nfrom datetime import timedelta\n\n# If inter-line delay > max_interval, report the diff\nmax_interval = 5\n\n# Open (in readonly) log_file file for parsing\nlog_data = open(log_file, 'r')\noutput_log_data = open(log_file +'.csv', 'w')\nline_number = 1\nlog_file_date_time_format = '%Y-%m-%dT%H:%M:%S'\n\n[time_now_object, rest_of_line] = re.split('[\\+]', log_data.readline().strip(), 1)\nprev_datetime = datetime.strptime(time_now_object, log_file_date_time_format)\nstart_time = prev_datetime\n# time_now = datetime(2020, 1, 1)\n\nlog_data.seek(0)\n\nfor line in log_data:\n # print('[{}] line: {}'.format(count, line))\n [the_datetime, this_line] = re.split('[\\+]', line.strip(), 1)\n current_datetime = datetime.strptime(the_datetime, log_file_date_time_format)\n # print('datetime_object={}'.format(datetime_object))\n\n diff = (current_datetime - prev_datetime).total_seconds()\n if diff > max_interval:\n print(' {} {}'.format(current_datetime, this_line))\n print('{:06d}: {} {}'.format(line_number, prev_datetime, previous_line))\n # print('diff = {}s'.format(time.strftime('%H:%M:%S', time.gmtime(diff))))\n\n # print the line number and diff (s) in CSV\n run_time = (current_datetime - start_time).total_seconds()\n print('{},{},{}'.format(line_number, run_time,diff))\n output_log_data.writelines('{},{},{}\\n'.format(line_number, run_time,diff))\n # input(\"Press Enter to continue...\")\n\n prev_datetime = current_datetime\n previous_line = this_line\n line_number += 1\n\n # input(\"Press Enter to continue...\")\n\noutput_log_data.close()" } ]
1
haarsh01/telegram-bot
https://github.com/haarsh01/telegram-bot
2265a3b58242ab412322f7d14313c3c0795fda66
f350ce1a7c707b639af383dd2a0b0e30493b8bc7
95f489724056690cb377702e1f7976873d597483
refs/heads/main
2023-03-08T06:16:59.582708
2021-02-20T10:45:24
2021-02-20T10:45:24
340,629,509
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6996830701828003, "alphanum_fraction": 0.6996830701828003, "avg_line_length": 26.68181800842285, "blob_id": "a5c337db7752ae191368f35d26b7d1c93983c138", "content_id": "859dc885d0a43d151c5c500eb33ca1b0ca7187d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1262, "license_type": "no_license", "max_line_length": 102, "num_lines": 44, "path": "/bot.py", "repo_name": "haarsh01/telegram-bot", "src_encoding": "UTF-8", "text": "import logging\r\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\r\n\r\nlogging.basicConfig(format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\", level=logging.INFO)\r\nlogger = logging.getLogger(__name__)\r\n\r\nTOKEN = \"1416864887:AAG7eJIcQXlfmKqCbfwhmI0aQhDBkgUOGn0\"\r\n\r\n\r\ndef start(bot, update):\r\n print(update)\r\n author = update.message.from_user.first_name\r\n reply = f\"Hi! {author}\"\r\n bot.send_message(chat_id=update.message.chat_id, text=reply)\r\n\r\n\r\ndef _help(bot, update):\r\n reply = \"this is a help message\"\r\n bot.send_message(chat_id=update.message.chat_id, text=reply)\r\n\r\n\r\ndef echo_text(bot, update):\r\n print(update)\r\n bot.send_message(chat_id=update.message.chat_id, text=update.message.text)\r\n\r\n\r\ndef echo_sticker(bot, update):\r\n bot.send_message(chat_id=update.message.chat_id, text=update.message.sticker.file_id)\r\n\r\n\r\ndef error(bot, update):\r\n logger.error(\"error\")\r\n\r\n\r\nupdater = Updater(TOKEN, use_context=False)\r\ndp = updater.dispatcher\r\ndp.add_handler(CommandHandler(\"start\", start))\r\ndp.add_handler(CommandHandler(\"help\", _help))\r\ndp.add_handler(MessageHandler(Filters.text, echo_text))\r\ndp.add_handler(MessageHandler(Filters.sticker, echo_sticker))\r\n\r\nupdater.start_polling()\r\nlogger.info(\"started polling...\")\r\nupdater.idle()\r\n" } ]
1
yvnicolas/odoo-enhanced_leads
https://github.com/yvnicolas/odoo-enhanced_leads
9a1075940c86d5e52e830da2ffe6461231d3332f
70fb35564ca012a58b96f36bb63c75fc05716c3e
d0175fe4b53499cfe5cb4d5b65ba21f4dc63edf4
refs/heads/master
2021-01-10T01:40:09.265503
2016-02-16T18:39:22
2016-02-16T18:39:22
51,858,930
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8187500238418579, "alphanum_fraction": 0.8187500238418579, "avg_line_length": 52.33333206176758, "blob_id": "b39f04d5ab223506d634c4724758927f1ecb29b2", "content_id": "a273052e12144e313d91398cdfc6c436a0949c43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 160, "license_type": "no_license", "max_line_length": 79, "num_lines": 3, "path": "/README.md", "repo_name": "yvnicolas/odoo-enhanced_leads", "src_encoding": "UTF-8", "text": "A very simple odoo module to add some features to odoo standard lead management\n\nThis is also use as an ongoing personnal practice for odoo module development.\n" }, { "alpha_fraction": 0.7157360315322876, "alphanum_fraction": 0.7174280881881714, "avg_line_length": 38.266666412353516, "blob_id": "e3c86b031aa071c6f6659202c71b8607088e7783", "content_id": "4c473f8c741efface324f5a1589a919a33d4700a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 591, "license_type": "no_license", "max_line_length": 79, "num_lines": 15, "path": "/models.py", "repo_name": "yvnicolas/odoo-enhanced_leads", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\nfrom openerp.addons.crm import crm_lead\n\nclass enhanced_Leads(models.Model):\n\n\t# _inherit and _name being the same ensures we keep the same object everywhere\n _inherit = 'crm.lead'\n _name = 'crm.lead'\n name = fields.Char()\n last_Contact=fields.Char(string=\"Last Contact Summary\", required=False)\n last_Contact_Date=fields.Date(string=\"Last Contact Date\", required=False)\n next_Action=fields.Char(string=\"Next Step\", required=False)\n next_Action_Date=fields.Date(string=\"Next Step Date\", required=False)\n\n\n" } ]
2