max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
backend/logger/migrations/0029_rename_httprequest_payload.py | AstroMatt/esa-subjective-time-perception | 1 | 12791351 | <gh_stars>1-10
# Generated by Django 4.0.4 on 2022-05-07 09:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logger', '0028_remove_httprequest_api_version_and_more'),
]
operations = [
migrations.RenameModel(
old_name='HTTPRequest',
new_name='Payload',
),
]
| 1.515625 | 2 |
nn/mnist_tf_coreml/pb_to_coreml.py | kamino410/edsdk-sample | 23 | 12791352 | <filename>nn/mnist_tf_coreml/pb_to_coreml.py
import tensorflow as tf
import tfcoreml
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('input_pb')
parser.add_argument('output_mlmodel')
args = parser.parse_args()
model = tfcoreml.convert(tf_model_path=args.input_pb,
mlmodel_path=args.output_mlmodel,
output_feature_names=['dense_1/Softmax:0'],
input_name_shape_dict={
'flatten_input:0': [1, 28, 28, 1]},
image_input_names=['flatten_input:0'])
spec = model.get_spec()
print(spec.description.output)
| 2.765625 | 3 |
build_framework/build_clib.py | kdschlosser/wxAnimation | 2 | 12791353 | <filename>build_framework/build_clib.py
# -*- coding: utf-8 -*-
import distutils
import distutils.errors
import distutils.core
import distutils.command.build_clib
import distutils.log
from distutils.sysconfig import customize_compiler
import distutils.dir_util
import os
from . import spawn_process
from .library.library_base import Library
class build_clib(distutils.core.Command):
user_options = [
('build-clib=', 'b',
"directory to build C/C++ libraries to"),
('build-temp=', 't',
"directory to put temporary build by-products"),
('debug', 'g',
"compile with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", distutils.command.build_clib.show_compilers),
]
def spawn(self, *args, **kwargs):
spawn_process.spawn(*args, **kwargs)
# we override the compilers mkpath so we can inject the verbose option.
# the compilers version does not allow for setting of a verbose level
# and distutils.dir_util.mkpath defaults to a verbose level of 1 which
# which prints out each and every directory it makes. This congests the
# output unnecessarily.
def mkpath(self, name, mode=0o777):
distutils.dir_util.mkpath(
name,
mode,
dry_run=self.compiler.dry_run,
verbose=0
)
def initialize_options(self):
self.build_clib = None
self.build_temp = None
# List of libraries to build
self.libraries = None
# Compilation options for all libraries
self.include_dirs = None
self.define = None
self.undef = None
self.debug = None
self.force = 0
self.compiler = None
def finalize_options(self):
# This might be confusing: both build-clib and build-temp default
# to build-temp as defined by the "build" command. This is because
# I think that C libraries are really just temporary build
# by-products, at least from the point of view of building Python
# extensions -- but I want to keep my options open.
self.set_undefined_options(
'build',
('build_temp', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force')
)
if not os.path.exists(self.build_clib):
os.makedirs(self.build_clib)
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
self.libraries = self.distribution.libraries
self.check_library_list(self.libraries)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
def run(self):
if not self.libraries:
return
# we are leaving this here so if wanted the built in compiler for distutils can be used
# Instead of using a tuple and a dict to provide compiler options I decided to make a class
# call Library. This class is what will hold all of the various build components needed
# for a build. Now. There is a method "build" that ghets called. if this method is overridden
# it is what gets used instread of the internal compiler. I created a wrapper class around the
# Library which institutes a multi threaded compiling process. we no longer use the built in
# compiler with distutils. I am not able to use the distutils compiler in a threaded scenario
# because it was not designed to be thread safe and things get all kinds of funky.
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(
compiler=self.compiler,
dry_run=self.dry_run,
force=self.force
)
# replace the compilers spawn and mkpath with the onces that we have written
self.compiler.spawn = self.spawn
self.compiler.mkpath = self.mkpath
customize_compiler(self.compiler)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
self.build_libraries(self.libraries)
def check_library_list(self, libraries):
if not isinstance(libraries, (list, tuple)):
raise distutils.errors.DistutilsSetupError(
"'libraries' options need to be either a list or a tuple.")
for lib in libraries:
if not isinstance(lib, Library):
raise distutils.errors.DistutilsSetupError(
"contents of 'libraries' needs to be instances of 'Library' not " + str(type(lib))
)
# lib.validate()
def get_library_names(self):
# Assume the library list is valid -- 'check_library_list()' is
# called from 'finalize_options()', so it should be!
if not self.libraries:
return None
lib_names = []
for lib in self.libraries:
lib_names.append(lib.name)
return lib_names
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
for lib in self.libraries:
filenames.extend(lib.sources)
return filenames
def build_libraries(self, libraries):
for lib in libraries:
distutils.log.info("building '%s' library", lib.name)
try:
lib.build(self)
except NotImplementedError:
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
include_dirs = lib.include_dirs
objects = self.compiler.compile(
lib.sources,
output_dir=self.build_temp,
macros=lib.macros,
include_dirs=include_dirs,
debug=self.debug
)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(
objects,
lib.name,
output_dir=self.build_clib,
debug=self.debug
)
| 2.109375 | 2 |
gradools/mconfig.py | matthew-brett/gradools | 1 | 12791354 | <filename>gradools/mconfig.py
""" Tools for grading
"""
from os.path import exists, join as pjoin
from collections import OrderedDict
import pytoml as toml
import pandas as pd
class ConfigError(RuntimeError):
pass
class Config:
config_fname = 'gdconfig.toml'
required_fields = ('year',)
default_log = 'marking_log.md'
def __init__(self):
self._params = None
def __getitem__(self, key):
return self.params[key]
def __contains__(self, key):
return key in self.params
def get(self, key, *args, **kwargs):
return self.params.get(key, *args, **kwargs)
@property
def params(self):
if self._params is None:
self._params = self._read_config()
return self._params
def _read_config(self):
fname = self.config_fname
if not exists(fname):
raise ConfigError(
f'Should be {fname} in current directory')
with open(fname, 'rb') as fobj:
config = toml.load(fobj)
for field in self.required_fields:
if not field in config:
raise ConfigError(f'{fname} should have "{field}" field')
return config
@property
def marking_log(self):
fn = self.get('log', self.default_log)
if not exists(fn):
raise ConfigError(f'Log {fn} does not exist')
return fn
@property
def year(self):
return self.params['year']
@property
def student_fname(self):
return f'students_{self.year}.csv'
@property
def marks_fname(self):
return f'marks_{self.year}.csv'
@property
def nb_template(self):
template = self.get('notebooks', {}).get('template')
if template is None:
return None
return pjoin(*template.split('/'))
def get_students(self):
if not exists(self.student_fname):
raise ConfigError('Run gdo-mkstable here')
return pd.read_csv(self.student_fname)
@property
def scores(self):
return get_scores(self.marking_log)
@property
def score_lines(self):
return get_score_lines(*self.scores)
CONFIG = Config()
def print_year():
print(CONFIG['year'])
def get_scores(fileish):
if hasattr(fileish, 'read'):
contents = fileish.read()
else:
with open(fileish, 'rt') as fobj:
contents = fobj.read()
lines = contents.splitlines()
state = 'searching'
o_scores = OrderedDict()
e_scores = OrderedDict()
for i, line in enumerate(lines):
line = line.strip()
if line == '':
continue
if state == 'searching':
if line == 'Ordinary maxima:':
state = 'ordinary-scores'
elif state == 'ordinary-scores':
if line == 'Extra maxima:':
state = 'extra-scores'
continue
elif line.startswith('Total'):
break
key, value = proc_line(line)
o_scores[key] = float(value)
elif state == 'extra-scores':
if line.startswith('Total'):
break
key, value = proc_line(line)
e_scores[key] = float(value)
return o_scores, e_scores
def proc_line(line):
if not line.startswith('*'):
raise ValueError('Invalid list element')
return [v.strip() for v in line[1:].split(':')]
def get_score_lines(o_scores, e_scores):
lines = [f'* {k}: {v}' for k, v in o_scores.items()]
if e_scores:
lines.append('')
lines += [f'* {k}: {v}' for k, v in e_scores.items()]
return '\n'.join(lines) + '\n'
| 2.40625 | 2 |
wandb/integration/xgboost/xgboost.py | ayulockin/client | 0 | 12791355 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
xgboost init
"""
import os
import json
import wandb
import warnings
import xgboost as xgb
from typing import cast
from pathlib import Path
MINIMIZE_METRICS = [
"rmse",
"rmsle",
"mae",
"mape",
"mphe",
"logloss",
"error",
"error@t",
"merror",
]
MAXIMIZE_METRICS = ["auc", "aucpr", "ndcg", "map", "ndcg@n", "map@n"]
def wandb_callback():
"""
Old style callback that will be deprecated in favor of WandbCallback. Please try the new logger for more features.
"""
warnings.warn(
"wandb_callback will be deprecated in favor of WandbCallback. Please use WandbCallback for more features.",
UserWarning,
stacklevel=2,
)
def callback(env):
for k, v in env.evaluation_result_list:
wandb.log({k: v}, commit=False)
wandb.log({})
return callback
class WandbCallback(xgb.callback.TrainingCallback):
"""`WandbCallback` automatically integrates XGBoost with wandb.
Arguments:
log_model: (boolean) if True save and upload the model to Weights & Biases Artifacts
log_feature_importance: (boolean) if True log a feature importance bar plot
importance_type: (str) one of {weight, gain, cover, total_gain, total_cover} for tree model. weight for linear model.
define_metric: (boolean) if True (default) capture model performance at the best step, instead of the last step, of training in your `wandb.summary`.
Passing `WandbCallback` to XGBoost will:
- log the booster model configuration to Weights & Biases
- log evaluation metrics collected by XGBoost, such as rmse, accuracy etc to Weights & Biases
- log training metric collected by XGBoost (if you provide training data to eval_set)
- log the best score and the best iteration
- save and upload your trained model to to Weights & Biases Artifacts (when `log_model = True`)
- log feature importance plot when `log_feature_importance=True` (default).
- Capture the best eval metric in `wandb.summary` when `define_metric=True` (default).
Example:
```python
bst_params = dict(
objective ='reg:squarederror',
colsample_bytree = 0.3,
learning_rate = 0.1,
max_depth = 5,
alpha = 10,
n_estimators = 10,
tree_method = 'hist'
)
xg_reg = xgb.XGBRegressor(**bst_params)
xg_reg.fit(X_train,
y_train,
eval_set=[(X_test, y_test)],
callbacks=[WandbCallback()])
)
```
"""
def __init__(
self,
log_model: bool = False,
log_feature_importance: bool = True,
importance_type: str = "gain",
define_metric: bool = True,
):
if wandb.run is None:
raise wandb.Error("You must call wandb.init() before WandbCallback()")
self.log_model = log_model
self.log_feature_importance = log_feature_importance
self.importance_type = importance_type
self.define_metric = define_metric
def before_training(self, model):
"""Run before training is finished"""
# Update W&B config
config = model.save_config()
wandb.config.update(json.loads(config))
return model
def after_training(self, model):
"""Run after training is finished."""
# Log the booster model as artifacts
if self.log_model:
self._log_model_as_artifact(model)
# Plot feature importance
if self.log_feature_importance:
self._log_feature_importance(model)
# Log the best score and best iteration
if model.attr("best_score") is not None:
wandb.log(
{
"best_score": float(cast(str, model.attr("best_score"))),
"best_iteration": int(cast(str, model.attr("best_iteration"))),
}
)
return model
def after_iteration(self, model, epoch, evals_log):
"""Run after each iteration. Return True when training should stop."""
# Log metrics
for data, metric in evals_log.items():
for metric_name, log in metric.items():
if self.define_metric:
self._define_metric(data, metric_name)
wandb.log({f"{data}-{metric_name}": log[-1]}, commit=False)
else:
wandb.log({f"{data}-{metric_name}": log[-1]}, commit=False)
wandb.log({"epoch": epoch})
self.define_metric = False
return False
def _log_model_as_artifact(self, model):
model_name = f"{wandb.run.id}_model.json"
model_path = Path(wandb.run.dir) / model_name
model.save_model(str(model_path))
model_artifact = wandb.Artifact(name=model_name, type="model")
model_artifact.add_file(model_path)
wandb.log_artifact(model_artifact)
def _log_feature_importance(self, model):
fi = model.get_score(importance_type=self.importance_type)
fi_data = [[k, fi[k]] for k in fi]
table = wandb.Table(data=fi_data, columns=["Feature", "Importance"])
wandb.log(
{
"Feature Importance": wandb.plot.bar(
table, "Feature", "Importance", title="Feature Importance"
)
}
)
def _define_metric(self, data, metric_name):
if "loss" in str.lower(metric_name):
wandb.define_metric(f"{data}-{metric_name}", summary="min")
elif str.lower(metric_name) in MINIMIZE_METRICS:
wandb.define_metric(f"{data}-{metric_name}", summary="min")
elif str.lower(metric_name) in MAXIMIZE_METRICS:
wandb.define_metric(f"{data}-{metric_name}", summary="max")
else:
pass
| 2.53125 | 3 |
fix_timestamps.py | molguin92/EdgeDroidResults | 0 | 12791356 | """
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import os
experiments = {
'1 Client': '1Client_IdealBenchmark',
'5 Clients': '5Clients_IdealBenchmark',
'10 Clients': '10Clients_IdealBenchmark'
}
for exp, dir in experiments.items():
os.chdir(dir)
for r in range(1, 6):
os.chdir('run_{}'.format(r))
with open('server_stats.json', 'r') as f:
data = json.load(f)
with open('server_stats.json', 'w') as f:
data['run_start'] = data['run_start'] - 7200000
data['run_end'] = data['run_end'] - 7200000
json.dump(data, f)
os.chdir('..')
os.chdir('..')
| 1.890625 | 2 |
oldp/apps/cases/migrations/0010_case_abstract.py | docsuleman/oldp | 66 | 12791357 | <reponame>docsuleman/oldp<filename>oldp/apps/cases/migrations/0010_case_abstract.py
# Generated by Django 2.1.1 on 2018-09-18 08:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0009_auto_20180430_1225'),
]
operations = [
migrations.AddField(
model_name='case',
name='abstract',
field=models.TextField(blank=True, help_text='Case abstract (Leitsatz) formatted in Legal Markdown', null=True),
),
]
| 1.5 | 2 |
tests/connectors/mock/test_mockdata.py | gvasold/papilotte | 3 | 12791358 | """Test creation of mock data.
"""
import datetime
from papilotte.connectors.mock import mockdata
def test_generate_person():
"Make sure generate_person() doesn not create more than 15 different persons."
num_of_different_objects = 15
generator = mockdata.generate_person(num_of_different_objects)
objects = {}
for _ in range(num_of_different_objects * 10):
obj = next(generator)
buf = objects.get(obj["@id"], [])
buf.append(obj)
objects[obj["@id"]] = buf
for pid in objects:
assert len(objects[pid]) == 10
# make sure persons with same pid contain same data
for pid, objlist in objects.items():
last_obj = None
for obj in objlist:
if last_obj is None:
last_obj = obj
else:
assert last_obj == obj
def test_generate_source():
"Make sure generate_source() does not create more than 15 different sources."
num_of_different_objects = 25
generator = mockdata.generate_source(num_of_different_objects)
objects = {}
for _ in range(num_of_different_objects * 10):
obj = next(generator)
buf = objects.get(obj["@id"], [])
buf.append(obj)
objects[obj["@id"]] = buf
for pid in objects:
assert len(objects[pid]) == 10
# make sure sources with sam pid contain same data
for pid, objlist in objects.items():
last_obj = None
for obj in objlist:
if last_obj is None:
last_obj = obj
else:
assert last_obj == obj
def test_generate_statement():
"Make sure generate_statement() works as expected."
factoid = {
"@id": "Factoid 1",
"createdWhen": "2019-07-21",
"createdBy": "User 1",
"modifiedWhen": "2019-10-12",
"modifiedBy": "User 2",
}
generator = mockdata.generate_statement(factoid, 1)
for i in range(5):
stmt = next(generator)
assert stmt["@id"] == "F1S%d" % (i + 1)
assert stmt["createdBy"] == factoid["createdBy"]
assert stmt["createdWhen"] == factoid["createdWhen"]
assert stmt["modifiedBy"] == factoid["modifiedBy"]
assert stmt["modifiedWhen"] == factoid["modifiedWhen"]
def test_generate_factoid():
"""Test the factoid generator.
"""
generator = mockdata.generate_factoid()
for i in range(100):
factoid = next(generator)
assert factoid["@id"] == "Factoid %03d" % (i + 1)
assert "Person" in factoid["person"]["@id"]
assert "Source" in factoid["source"]["@id"]
assert "statement" in factoid
assert factoid["statement"]["@id"] == "F%dS1" % (i + 1)
def test_make_label_objects():
"Make sure simple object consisting of a label and an uri or created as expected."
for counter in (1, 4):
objects = mockdata.make_label_objects(3, "xxx", counter)
for i, obj in enumerate(objects):
assert obj["label"] == "Xxx %d_%d" % (counter, i + 1)
assert obj["uri"] == "http://example.com/xxx/%d/%d" % (counter, i + 1)
def test_make_date():
"Make date generates a dict consisting of a date-label and a date string."
# make_date might return an empty dict
assert mockdata.make_date(0) is None
assert mockdata.make_date(1) == {"label": "1801", "sortdate": "1801"}
assert mockdata.make_date(2) == {"label": "February 1802", "sortdate": "1802-02"}
assert mockdata.make_date(3) == {"label": "3 March 1803", "sortdate": "1803-03-03"}
assert mockdata.make_date(5) is None
assert mockdata.make_date(6) == {"label": "1806", "sortdate": "1806"}
assert mockdata.make_date(7) == {"label": "July 1807", "sortdate": "1807-07"}
assert mockdata.make_date(8) == {"label": "8 August 1808", "sortdate": "1808-08-08"}
assert mockdata.make_date(9) == {}
def test_make_date_distribution():
"Check if dates are equally distributed in mockdata."
counter = {}
for i in range(1000):
data = mockdata.make_date(i)
if data is None:
counter["None"] = counter.get("None", 0) + 1
elif data == {}:
counter["empty"] = counter.get("empty", 0) + 1
elif data["sortdate"].count("-") == 0:
counter["yyyy"] = counter.get("yyyy", 0) + 1
elif data["sortdate"].count("-") == 1:
counter["yyyy-mm"] = counter.get("yyyy-mm", 0) + 1
elif data["sortdate"].count("-") == 2:
counter["yyyy-mm-dd"] = counter.get("yyyy-mm-dd", 0) + 1
assert counter["None"] == counter["empty"]
assert counter["None"] == counter["yyyy"]
assert counter["None"] == counter["yyyy-mm"]
assert counter["None"] == counter["yyyy-mm-dd"]
def test_uris():
"Test the mockdata get_uri function."
assert mockdata.get_uris(1) == [
"http://example.com/1",
"http://example.com/2",
"http://example.com/3",
]
assert mockdata.get_uris(2) == [
"http://example.com/1",
"http://example.com/2",
"http://example.com/3",
"http://example.com/4",
"http://example.com/5",
"http://example.com/6",
"http://example.com/7",
"http://example.com/8",
]
assert mockdata.get_uris(3) == [
"http://example.com/1",
"http://example.com/2",
"http://example.com/3",
"http://example.com/4",
"http://example.com/5",
"http://example.com/6",
"http://example.com/7",
"http://example.com/8",
"http://example.com/9",
"http://example.com/10",
"http://example.com/11",
"http://example.com/12",
"http://example.com/13",
"http://example.com/14",
"http://example.com/15",
]
def test_get_modifier_distribution():
"""Check if distribution of modifier names is close to equal and if
there are exactly 3 modifiers.
"""
counter = {}
for i in range(999):
modifier = mockdata.get_modifier(i)
counter[modifier] = counter.get(modifier, 0) + 1
assert counter["Modifier 1"] == counter["Modifier 2"]
assert counter["Modifier 1"] == counter["Modifier 3"]
def test_get_modifer():
"Test creation order of get_modifier()."
assert mockdata.get_modifier(1) == "Modifier 3"
assert mockdata.get_modifier(2) == "Modifier 1"
assert mockdata.get_modifier(3) == "Modifier 2"
assert mockdata.get_modifier(4) == "Modifier 3"
assert mockdata.get_modifier(5) == "Modifier 1"
assert mockdata.get_modifier(6) == "Modifier 2"
def test_get_creator_distribution():
"""Check if distribution of creator names is close to equal and if
there are exactly 3 creators.
"""
counter = {}
for i in range(1000):
modifier = mockdata.get_creator(i)
counter[modifier] = counter.get(modifier, 0) + 1
assert counter["Creator 1"] == counter["Creator 2"]
assert counter["Creator 1"] == counter["Creator 3"]
assert counter["Creator 1"] == counter["Creator 4"]
assert counter["Creator 1"] == counter["Creator 5"]
def test_get_creator():
"Test creation order of get_creator()."
for i in range(1, 6):
assert mockdata.get_creator(i) == "Creator %d" % i
def test_get_datetime():
"Test the mockdata get_date function."
expected = [
"2000-01-01T00:00:00+02:00",
"2000-01-02T10:17:36+02:00",
"2000-01-03T20:35:12+02:00",
"2000-01-05T06:52:48+02:00",
"2000-01-06T17:10:24+02:00",
"2000-01-08T03:28:00+02:00",
"2000-01-09T13:45:36+02:00",
"2000-01-11T00:03:12+02:00",
"2000-01-12T10:20:48+02:00",
"2000-01-13T20:38:24+02:00",
]
base_date = datetime.datetime(2000, 1, 1)
for i in range(10):
assert mockdata.get_datetime(base_date, i) == expected[i]
def test_get_datetime_with_offset():
"Test if getting a date with offset works."
expected = [
"2000-01-01T00:00:00+02:00",
"2000-01-03T08:30:56+02:00",
"2000-01-07T13:28:32+02:00",
"2000-01-13T14:52:48+02:00",
"2000-01-21T12:43:44+02:00",
"2000-01-08T03:28:00+02:00",
"2000-01-15T03:05:36+02:00",
"2000-01-23T23:09:52+02:00",
"2000-02-03T15:40:48+02:00",
"2000-02-16T04:38:24+02:00",
"2000-01-15T06:56:00+02:00",
"2000-01-26T21:40:16+02:00",
"2000-02-09T08:51:12+02:00",
"2000-02-24T16:28:48+02:00",
"2000-03-12T20:33:04+02:00",
"2000-01-22T10:24:00+02:00",
"2000-02-07T16:14:56+02:00",
"2000-02-25T18:32:32+02:00",
"2000-03-16T17:16:48+02:00",
"2000-04-07T12:27:44+02:00",
]
base_date = datetime.datetime(2000, 1, 1)
for i in range(20):
assert mockdata.get_datetime(base_date, i, True) == expected[i]
def test_mod_time_after_creation_time():
"Assert modification cannot be earlier than creation"
base_date = datetime.datetime(2000, 1, 1)
for i in range(1000):
creation_time = mockdata.get_datetime(base_date, i)
modification_time = mockdata.get_datetime(base_date, i, True)
assert creation_time <= modification_time
def test_idempotence():
"Generate a mock data set multiple times and make sure they are identical"
def make_factoids(num):
generated_factoids = []
generator = mockdata.generate_factoid()
for _ in range(num):
generated_factoids.append(next(generator))
return generated_factoids
data_to_compare = make_factoids(250)
for _ in range(10):
assert data_to_compare == make_factoids(250)
def test_make_factoids():
"make_factoids is a convenience function to create test data."
assert len(mockdata.make_factoids(15)) == 15
| 2.90625 | 3 |
sdk/python/pulumi_spotinst/aws/ocean.py | 346/pulumi-spotinst | 1 | 12791359 | <reponame>346/pulumi-spotinst<filename>sdk/python/pulumi_spotinst/aws/ocean.py<gh_stars>1-10
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class Ocean(pulumi.CustomResource):
autoscaler: pulumi.Output[dict]
"""
Describes the Ocean Kubernetes autoscaler.
"""
blacklists: pulumi.Output[list]
"""
Instance types not allowed in the Ocean cluster. Cannot be configured if `whitelist` is configured.
"""
controller_id: pulumi.Output[str]
"""
The ocean cluster identifier. Example: `ocean.k8s`
"""
desired_capacity: pulumi.Output[int]
"""
The number of instances to launch and maintain in the cluster.
"""
fallback_to_ondemand: pulumi.Output[bool]
"""
If not Spot instance markets are available, enable Ocean to launch On-Demand instances instead.
"""
iam_instance_profile: pulumi.Output[str]
"""
The instance profile iam role.
"""
image_id: pulumi.Output[str]
"""
ID of the image used to launch the instances.
"""
key_name: pulumi.Output[str]
"""
The key pair to attach the instances.
"""
max_size: pulumi.Output[int]
"""
The upper limit of instances the cluster can scale up to.
"""
min_size: pulumi.Output[int]
"""
The lower limit of instances the cluster can scale down to.
"""
name: pulumi.Output[str]
"""
The cluster name.
"""
region: pulumi.Output[str]
"""
The region the cluster will run in.
"""
security_groups: pulumi.Output[list]
"""
One or more security group ids.
"""
spot_percentage: pulumi.Output[float]
"""
The percentage of Spot instances the cluster should maintain. Min 0, max 100.
"""
subnet_ids: pulumi.Output[list]
"""
A comma-separated list of subnet identifiers for the Ocean cluster. Subnet IDs should be configured with auto assign public ip.
"""
tags: pulumi.Output[list]
"""
Optionally adds tags to instances launched in an Ocean cluster.
"""
user_data: pulumi.Output[str]
"""
Base64-encoded MIME user data to make available to the instances.
"""
utilize_reserved_instances: pulumi.Output[bool]
"""
If Reserved instances exist, OCean will utilize them before launching Spot instances.
"""
whitelists: pulumi.Output[list]
"""
Instance types allowed in the Ocean cluster. Cannot be configured if `blacklist` is configured.
"""
def __init__(__self__, resource_name, opts=None, autoscaler=None, blacklists=None, controller_id=None, desired_capacity=None, fallback_to_ondemand=None, iam_instance_profile=None, image_id=None, key_name=None, max_size=None, min_size=None, name=None, region=None, security_groups=None, spot_percentage=None, subnet_ids=None, tags=None, user_data=None, utilize_reserved_instances=None, whitelists=None, __name__=None, __opts__=None):
"""
Provides a Spotinst Ocean AWS resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[dict] autoscaler: Describes the Ocean Kubernetes autoscaler.
:param pulumi.Input[list] blacklists: Instance types not allowed in the Ocean cluster. Cannot be configured if `whitelist` is configured.
:param pulumi.Input[str] controller_id: The ocean cluster identifier. Example: `ocean.k8s`
:param pulumi.Input[int] desired_capacity: The number of instances to launch and maintain in the cluster.
:param pulumi.Input[bool] fallback_to_ondemand: If not Spot instance markets are available, enable Ocean to launch On-Demand instances instead.
:param pulumi.Input[str] iam_instance_profile: The instance profile iam role.
:param pulumi.Input[str] image_id: ID of the image used to launch the instances.
:param pulumi.Input[str] key_name: The key pair to attach the instances.
:param pulumi.Input[int] max_size: The upper limit of instances the cluster can scale up to.
:param pulumi.Input[int] min_size: The lower limit of instances the cluster can scale down to.
:param pulumi.Input[str] name: The cluster name.
:param pulumi.Input[str] region: The region the cluster will run in.
:param pulumi.Input[list] security_groups: One or more security group ids.
:param pulumi.Input[float] spot_percentage: The percentage of Spot instances the cluster should maintain. Min 0, max 100.
:param pulumi.Input[list] subnet_ids: A comma-separated list of subnet identifiers for the Ocean cluster. Subnet IDs should be configured with auto assign public ip.
:param pulumi.Input[list] tags: Optionally adds tags to instances launched in an Ocean cluster.
:param pulumi.Input[str] user_data: Base64-encoded MIME user data to make available to the instances.
:param pulumi.Input[bool] utilize_reserved_instances: If Reserved instances exist, OCean will utilize them before launching Spot instances.
:param pulumi.Input[list] whitelists: Instance types allowed in the Ocean cluster. Cannot be configured if `blacklist` is configured.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['autoscaler'] = autoscaler
__props__['blacklists'] = blacklists
__props__['controller_id'] = controller_id
__props__['desired_capacity'] = desired_capacity
__props__['fallback_to_ondemand'] = fallback_to_ondemand
__props__['iam_instance_profile'] = iam_instance_profile
__props__['image_id'] = image_id
__props__['key_name'] = key_name
__props__['max_size'] = max_size
__props__['min_size'] = min_size
__props__['name'] = name
__props__['region'] = region
if security_groups is None:
raise TypeError('Missing required property security_groups')
__props__['security_groups'] = security_groups
__props__['spot_percentage'] = spot_percentage
if subnet_ids is None:
raise TypeError('Missing required property subnet_ids')
__props__['subnet_ids'] = subnet_ids
__props__['tags'] = tags
__props__['user_data'] = user_data
__props__['utilize_reserved_instances'] = utilize_reserved_instances
__props__['whitelists'] = whitelists
super(Ocean, __self__).__init__(
'spotinst:aws/ocean:Ocean',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 1.9375 | 2 |
init.py | yuryyu/SmartHome | 0 | 12791360 | <filename>init.py
# configuration module
import socket
nb=1 # 0- HIT-"192.168.3.11", 1 - open HiveMQ - broker.hivemq.com
brokers=[str(socket.gethostbyname('vmm1.saaintertrade.com')), str(socket.gethostbyname('broker.hivemq.com')),"18 .194.176.210"]
ports=['80','1883','1883']
usernames = ['','',''] # should be modified for HIT
passwords = ['','',''] # should be modified for HIT
broker_ip=brokers[nb]
port=ports[nb]
username = usernames[nb]
password = <PASSWORD>[nb]
conn_time = 0 # 0 stands for endless
# mzs=['matzi/','']
# sub_topics =[mzs[nb]+'#','#']
# pub_topics = [mzs[nb]+'test', 'test']
# ext_man = mzs[nb]+'system/command'
# sub_topic = [mzs[nb]+'bearer/accel/status', mzs[nb]+'bearer/belt/status']
# pub_topic = mzs[nb]+'system/state'
msg_system = ['normal', 'issue','No issue']
wait_time = 5
broker_ip=brokers[nb]
broker_port=ports[nb]
username = usernames[nb]
password = <PASSWORD>[nb]
# sub_topic = sub_topics[nb]
# pub_topic = pub_topics[nb]
# Common
conn_time = 0 # 0 stands for endless loop
comm_topic = 'pr/Smart/'
#comm_topic = 'pr/Smart/Home/'
# FFT module init data
isplot = False
issave = False
# DSP init data
percen_thr=0.05 # 5% of max energy holds
Fs = 2048.0
deviation_percentage = 10
max_eucl = 0.5
# Acq init data
acqtime = 60.0 # sec
manag_time = 10 # sec
# DB init data
db_name = 'data\\homedata_05_2.db' # SQLite
db_init = False #False # True if we need reinit smart home setup
# Meters consuption limits"
Water_max=0.02
Elec_max=1.8 | 2.03125 | 2 |
GetBulletChat.py | Hopejoyce/python_game | 1 | 12791361 | from bs4 import BeautifulSoup
import time
import pandas as pd
import requests
import datetime
headers={
"User-Agent":"",
"Connection": "keep-alive",
# 这个cookie的获取方法在文档中已说明
"Cookie":""
}
sets=124 # 最新一期的数字
dates=[] # 日期数组,用于填充url
# 遍历日期 包括begin和end的日期 生成类似2020-05-03的格式的日期
begin = datetime.date(2020,5,3)
end = datetime.date(2020,6,9)
d = begin
delta = datetime.timedelta(days=1)
while d <= end:
dates.append(str(d.strftime("%Y-%m-%d")))
d += delta
Cids=[] # Cid数组,用于填充url
with open('Urls/Cid.txt', 'r') as f:
for line in f.readlines():
Cids.append(line.strip())
for cid in Cids:
# 每次都要重置这些数据
dm_data = [] # 弹幕数据
dm_text = [] # 弹幕本体
# 弹幕的八个参数和弹幕本体
DM_time = []
DM_mode = []
DM_font = []
DM_color = []
DM_realTime = []
DM_pool = []
DM_userID = []
DM_id = []
DM_text = []
print("正在爬取第" + str(sets) + "期的《睡前消息》弹幕...")
for date in dates:
url="https://api.bilibili.com/x/v2/dm/history?type=1&oid="+cid+"&date="+date
html=requests.get(url=url,headers=headers) #返回文本信息
html.encoding='utf8'
soup=BeautifulSoup(html.text,'lxml') #建立soup对象
all=soup.find_all("d")
for d in all:
# 弹幕数据
dm_data.append(str(d.get("p")).split(","))
# 弹幕本体
dm_text.append(d.get_text())
# 分别把数据存进这几个数组
for i in dm_data:
DM_time.append(i[0])
DM_mode.append(i[1])
DM_font.append(i[2])
DM_color.append(i[3])
DM_realTime.append(i[4])
DM_pool.append(i[5])
DM_userID.append(i[6])
DM_id.append(i[7])
for i in dm_text:
DM_text.append(i)
dt={"DM_time":DM_time,"DM_mode":DM_mode,"DM_font":DM_font,"DM_color":DM_color,
"DM_realTime":DM_realTime,"DM_pool":DM_pool,"DM_userID":DM_userID,"DM_id":DM_id,"DM_text":DM_text}
d=pd.DataFrame(dt)
d.to_csv('./Danmu/Danmu-'+str(sets)+'.csv',encoding='utf-8-sig') #存储弹幕信息
print("已将弹幕放入到Danmu-"+str(sets)+".csv文件中")
sets-=1
# 每抓完一个网页休眠7秒
print("缓冲中...")
time.sleep(7)
print("已将《睡前消息》第110-124期的弹幕爬取完毕")
| 2.90625 | 3 |
run_ogs5py_scripts.py | GeoStat-Framework/ogs5py_benchmarks | 3 | 12791362 | # -*- coding: utf-8 -*-
"""
run all ogs5py benchmarks
"""
import sys
import os
import fnmatch
import time
from pexpect.popen_spawn import PopenSpawn
import pexpect
from ogs5py.tools.tools import Output
# pexpect.spawn just runs on unix-like systems
if sys.platform == "win32":
CmdRun = PopenSpawn
else:
CmdRun = pexpect.spawn
def call_script(script, output, timeout=3):
cwd, script_file = os.path.split(script)
args = [sys.executable, "-u", script_file]
try:
child = CmdRun(
" ".join(args), timeout=timeout, logfile=output, cwd=cwd
)
# wait for ogs to finish
child.expect(pexpect.EOF)
except pexpect.TIMEOUT:
output.write("...timeout\n".encode())
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
if __name__ == "__main__":
timeout = 3 # None for no timeout
out_dir = os.path.join(os.getcwd(), "benchmarks")
# out_dir = os.path.join(os.getcwd(), "benchmarks_FEM_active")
scripts = find("*.py", out_dir)
log_name = os.path.join(
out_dir, "run_log_" + time.strftime("%Y-%m-%d_%H-%M-%S") + ".txt"
)
output = Output(log_name, print_log=True)
for script in scripts:
print(script)
call_script(script, output, timeout=timeout)
output.close()
| 2.1875 | 2 |
src/libspec/arm/scripts/ast/nodes.py | agustingianni/retools | 80 | 12791363 | <filename>src/libspec/arm/scripts/ast/nodes.py<gh_stars>10-100
class BaseNode(object):
def accept(self, visitor):
return visitor.accept(self)
class BooleanValue(BaseNode):
def __init__(self, value):
self.value = value
def __str__(self):
return str("true" if self.value else "false")
class Identifier(BaseNode):
def __init__(self, name):
self.name = name
def __str__(self):
return str(self.name)
class NumberValue(BaseNode):
def __init__(self, value, bit_size=32):
self.value = value
self.bit_size = bit_size
def __len__(self):
"""
Return the bitsize of the number. Important for things like
the concatenation operator.
"""
return self.bit_size
def __str__(self):
return str(self.value)
class List(BaseNode):
def __init__(self, values):
self.values = values
def __len__(self):
return len(self.values)
def __str__(self):
return "(%s)" % ", ".join(map(str, self.values))
class Enumeration(BaseNode):
def __init__(self, values):
self.values = values
def __len__(self):
return len(self.values)
def __str__(self):
return "{%s}" % ", ".join(map(str, self.values))
class UnaryExpression(BaseNode):
def __init__(self, type_, expr):
self.type = type_
self.expr = expr
def __str__(self):
return "%s%s" % (str(self.type), str(self.expr))
class BinaryExpression(BaseNode):
def __init__(self, type_, left_expr, right_expr):
self.type = type_
self.left_expr = left_expr
self.right_expr = right_expr
def __str__(self):
return "%s %s %s" % (str(self.type), str(self.left_expr), str(self.right_expr))
class ProcedureCall(BaseNode):
def __init__(self, name_, arguments):
self.name = name_
self.arguments = arguments
def __str__(self):
return "%s(%s)" % (str(self.name), ", ".join(map(str, self.arguments)))
class RepeatUntil(BaseNode):
def __init__(self, statements, condition):
self.statements = statements
self.condition = condition
def __str__(self):
return "RepeatUntil: %s %s" % (str(self.statements), str(self.condition))
class While(BaseNode):
def __init__(self, condition, statements):
self.condition = condition
self.statements = statements
def __str__(self):
return "While: %s %s" % (str(self.condition), str(self.statements))
class For(BaseNode):
def __init__(self, from_, to, statements):
self.from_ = from_
self.to = to
self.statements = statements
def __str__(self):
return "For: %s %s %s" % (str(self.from_), str(self.to), str(self.statements))
class If(BaseNode):
def __init__(self, condition, if_statements, else_statements):
self.condition = condition
self.if_statements = if_statements
self.else_statements = else_statements
def __str__(self):
return "If: %s %s %s" % (str(self.condition), map(str, self.if_statements), map(str, self.else_statements))
class BitExtraction(BaseNode):
def __init__(self, identifier_, range_):
self.identifier = identifier_
self.range = range_
def __str__(self):
return "BitExtraction: %s %s" % (str(self.identifier), str(self.range))
class ArrayAccess(BaseNode):
def __init__(self, name, expr1, expr2, expr3):
self.name = name
self.expr1 = expr1
self.expr2 = expr2
self.expr3 = expr3
def __str__(self):
args = [str(self.expr1)]
if self.expr2:
args.append(str(self.expr2))
if self.expr3:
args.append(str(self.expr3))
return "ArrayAccess: %s[%s]" % (str(self.name), " ".join(args))
class MaskedBinary(BaseNode):
def __init__(self, value):
self.value = value
def __str__(self):
return "MaskedBinary: %s" % (str(self.value))
class Ignore(BaseNode):
def __str__(self):
return "Ignore"
class IfExpression(BaseNode):
def __init__(self, condition, trueValue, falseValue):
self.condition = condition
self.trueValue = trueValue
self.falseValue = falseValue
def __str__(self):
return "IfExpression: %s %s %s" % (str(self.condition), str(self.trueValue), str(self.falseValue))
class CaseElement(BaseNode):
def __init__(self, value, statements):
self.value = value
self.statements = statements
def __str__(self):
return "CaseElement: %s %s" % (str(self.value), str(self.statements))
class Case(BaseNode):
def __init__(self, expr, cases):
self.expr = expr
self.cases = cases
def __str__(self):
return "Case: %s %s" % (str(self.expr), str(self.cases))
class Undefined(BaseNode):
def __init__(self):
self.reason = ""
def __str__(self):
return "Undefined"
class Unpredictable(BaseNode):
def __init__(self):
self.reason = ""
def __str__(self):
return "Unpredictable"
class See(BaseNode):
def __init__(self, msg):
self.msg = msg.strip('"')
def __str__(self):
return "See: %s" % (str(self.msg))
class ImplementationDefined(BaseNode):
def __str__(self):
return "ImplementationDefined"
class SubArchitectureDefined(BaseNode):
def __str__(self):
return "SubArchitectureDefined"
class Return(BaseNode):
def __init__(self, value):
self.value = value
def __str__(self):
return "Return: %s" % (str(self.value))
| 2.859375 | 3 |
image_styles/views.py | fotorius/django-image-styles | 0 | 12791364 | from django.shortcuts import render, HttpResponse, get_object_or_404
from django.http import Http404
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.urls import reverse,reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views import View
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
import mimetypes
from .models import Style
from .forms import EffectForm,StyleForm
from .utils import get_effect_form_class,render_image
class RenderImageView(View):
def get(self,request,style_name,path):
image = render_image(style_name,path)
content_type = mimetypes.guess_type(image.image.path)
f = open(image.image.path,'rb')
r = HttpResponse(f,content_type=content_type[0])
f.close()
return r
class ModalForm(FormView):
template_name = 'image_styles/modal_form.html'
submit_button = _('Save')
delete_button = ''
title = _('Create')
action = '.'
def get_action(self):
return self.action
def get_submit_button(self):
return self.submit_button
def get_delete_button(self):
return self.delete_button
def get_title(self):
return self.title
def get_context_data(self,**kwargs):
context = super().get_context_data(**kwargs)
context['action'] = self.get_action()
context['submit_button'] = self.get_submit_button()
context['delete_button'] = self.get_delete_button()
context['title'] = self.get_title()
return context
class EffectFormMixin:
effect = None
style = None
title = _('Create Effect')
submit_button = _('Create')
def dispatch(self,request,*args,**kwargs):
self.effect_name = self.kwargs.get('effect_name')
style_id = self.kwargs.get('style_id')
if style_id:
self.style = get_object_or_404(Style,id=style_id)
effect_id = self.kwargs.get('effect_id')
if effect_id and self.effect_name:
from image_styles import models
self.effect = get_object_or_404(getattr(models,self.effect_name),id=effect_id)
return super().dispatch(request,*args,**kwargs)
def get_form_class(self):
form_class = get_effect_form_class(self.effect_name)
if form_class:
return form_class
raise Http404("Not Found")
def get_form_kwargs(self,*args,**kwargs):
data = super().get_form_kwargs(*args,**kwargs)
if self.effect:
data['instance'] = self.effect
return data
def get_submit_button(self):
if self.effect:
return _('Update')
return super().get_submit_button()
def get_title(self):
if self.effect:
return _('Update Effect')
return super().get_title()
def get_action(self):
if self.style:
return reverse(
'image_styles:effect_create',
kwargs={'style_id':self.style.id,'effect_name':self.effect_name}
)
return reverse(
'image_styles:effect_update',
kwargs={'effect':self.effect.id,'effect_name':self.effect_name}
)
def form_valid(self,form):
form.save()
return HttpResponse(_('Effect Created!'))
def delete(self,*args,**kwargs):
if self.effect:
self.effect.delete()
return HttpResponse(_('Effect Removed!'))
return HttpResponse(_('Delete failed!'))
class StyleFormMixin:
style = None
form_class = StyleForm
def dispatch(self,request,*args,**kwargs):
style_id = self.kwargs.get('style_id')
if style_id:
self.style = get_object_or_404(Style,id=style_id)
self.delete_button = _('Delete')
return super().dispatch(request,*args,**kwargs)
def get_form_kwargs(self,*args,**kwargs):
data = super().get_form_kwargs(*args,**kwargs)
if self.style:
data['instance'] = self.style
return data
def get_action(self):
if self.style:
return reverse(
'image_styles:style_update',
kwargs={'style_id':self.style.id}
)
return reverse('image_styles:style_create')
def get_submit_button(self):
if self.style:
return _('Update')
return super().get_submit_button()
def get_title(self):
if self.style:
return _('Update Style')
return super().get_title()
def form_valid(self,form):
form.save()
return HttpResponse(_('Style Created!'))
def delete(self,*args,**kwargs):
if self.style:
self.style.delete()
return HttpResponse(_('Style Removed!'))
return HttpResponse(_('Delete failed!'))
@method_decorator(staff_member_required(),name='dispatch')
class ManageImageStylesView(TemplateView):
template_name = 'image_styles/home.html'
def get_image_styles(self):
ims = []
for s in Style.objects.all():
effects = s.get_effects()
for i in range(len(effects)):
form = get_effect_form_class(effect_model=effects[i]['object'])
if form:
effects[i]['form'] = form(instance=effects[i]['object'])
effects[i]['action'] = reverse(
'image_styles:effect_update',
kwargs = {
'effect_id':effects[i]['object'].id,
'effect_name':effects[i]['object'].get_name()
}
)
ims.append({
'style':s,
'effects':effects,
})
return ims
def get_context_data(self,**kwargs):
context = super().get_context_data(**kwargs)
context['styles'] = self.get_image_styles()
return context
@method_decorator(staff_member_required(),name='dispatch')
class EffectCreateInitView(ModalForm):
form_class = EffectForm
submit_button = _('Next')
title = _('Select Effect')
def dispatch(self,request,*args,**kwargs):
self.style = get_object_or_404(Style,id=self.kwargs.get('style_id'))
return super().dispatch(request,*args,**kwargs)
def get_form(self,**kwargs):
form = super().get_form(**kwargs)
form.initial['style'] = self.style
return form
def get_submit_button(self):
if self.form_class != EffectForm:
return _('Create')
return super().get_submit_button()
def get_title(self):
if self.form_class != EffectForm:
return _('Create Effect')
return super().get_title()
def get_action(self):
if self.action == '.':
return reverse('image_styles:effect_create_init',kwargs={'style_id':self.style.id})
return self.action
def form_valid(self,form):
effect_name = form.cleaned_data.get('effect')
self.form_class = get_effect_form_class(effect_name=effect_name)
self.action = reverse(
'image_styles:effect_create',
kwargs={'style_id':self.style.id,'effect_name':effect_name}
)
self.request.method = 'GET'
return super().get(self.request,style_id=self.style.id)
@method_decorator(staff_member_required(),name='dispatch')
class EffectCreateView(EffectFormMixin,ModalForm):
title = _('Create Effect')
submit_button = _('Create')
def get_form(self,**kwargs):
form = super().get_form(**kwargs)
form.initial['style'] = self.style
return form
@method_decorator(staff_member_required(),name='dispatch')
class EffectUpdateView(EffectFormMixin,ModalForm):
pass
@method_decorator(staff_member_required(),name='dispatch')
class StyleView(StyleFormMixin,ModalForm):
pass
| 1.9375 | 2 |
python/pynamics_examples/parallel_five_bar_jumper_foot.py | zmpatel19/Foldable-Robotics | 2 | 12791365 | <filename>python/pynamics_examples/parallel_five_bar_jumper_foot.py
# -*- coding: utf-8 -*-
"""
Written by <NAME>
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import pynamics
from pynamics.frame import Frame
from pynamics.variable_types import Differentiable,Constant
from pynamics.system import System
from pynamics.body import Body
from pynamics.dyadic import Dyadic
from pynamics.output import Output,PointsOutput
from pynamics.particle import Particle
import pynamics.integration
from pynamics.constraint import KinematicConstraint,AccelerationConstraint
import sympy
import numpy
import matplotlib.pyplot as plt
plt.ion()
from math import pi
system = System()
pynamics.set_system(__name__,system)
tol=1e-5
lO = Constant(.5,'lO',system)
lA = Constant(.75,'lA',system)
lB = Constant(1,'lB',system)
lC = Constant(.75,'lC',system)
lD = Constant(1,'lD',system)
lE = Constant(1,'lE',system)
mO = Constant(2,'mO',system)
mA = Constant(.1,'mA',system)
mB = Constant(.1,'mB',system)
mC = Constant(.1,'mC',system)
mD = Constant(.1,'mD',system)
mE = Constant(.1,'mE',system)
I_main = Constant(1,'I_main',system)
I_leg = Constant(.1,'I_leg',system)
g = Constant(9.81,'g',system)
b = Constant(1e0,'b',system)
k = Constant(1e2,'k',system)
k_ankle = Constant(1e3,'k_ankle',system)
b_ankle = Constant(1e1,'b_ankle',system)
stall_torque = Constant(2e2,'stall_torque',system)
k_constraint = Constant(1e4,'k_constraint',system)
b_constraint = Constant(1e2,'b_constraint',system)
tinitial = 0
tfinal = 10
tstep = 1/30
t = numpy.r_[tinitial:tfinal:tstep]
preload1 = Constant(0*pi/180,'preload1',system)
preload2 = Constant(0*pi/180,'preload2',system)
preload3 = Constant(-180*pi/180,'preload3',system)
preload4 = Constant(0*pi/180,'preload4',system)
preload5 = Constant(180*pi/180,'preload5',system)
preload6 = Constant(0*pi/180,'preload6',system)
x,x_d,x_dd = Differentiable('x',system)
y,y_d,y_dd = Differentiable('y',system)
qO,qO_d,qO_dd = Differentiable('qO',system)
qA,qA_d,qA_dd = Differentiable('qA',system)
qB,qB_d,qB_dd = Differentiable('qB',system)
qC,qC_d,qC_dd = Differentiable('qC',system)
qD,qD_d,qD_dd = Differentiable('qD',system)
qE,qE_d,qE_dd = Differentiable('qE',system)
initialvalues={
x: 0,
x_d: .5,
y: 2,
y_d: 0,
qO: 5*pi/180,
qO_d: 0,
qA: -0.89,
qA_d: 0,
qB: -2.64,
qB_d: 0,
qC: -pi+0.89,
qC_d: 0,
qD: -pi+2.64,
qD_d: 0,
qE: 0,
qE_d: 0,
}
statevariables = system.get_state_variables()
ini0 = [initialvalues[item] for item in statevariables]
N = Frame('N',system)
O = Frame('O',system)
A = Frame('A',system)
B = Frame('B',system)
C = Frame('C',system)
D = Frame('D',system)
E = Frame('E',system)
system.set_newtonian(N)
O.rotate_fixed_axis(N,[0,0,1],qO,system)
A.rotate_fixed_axis(N,[0,0,1],qA,system)
B.rotate_fixed_axis(N,[0,0,1],qB,system)
C.rotate_fixed_axis(N,[0,0,1],qC,system)
D.rotate_fixed_axis(N,[0,0,1],qD,system)
E.rotate_fixed_axis(N,[0,0,1],qE,system)
pOrigin = 0*N.x+0*N.y
pOcm=x*N.x+y*N.y
pOA = pOcm+lO/2*O.x
pOC = pOcm-lO/2*O.x
pAB = pOA+lA*A.x
pBtip = pAB + lB*B.x
#vBtip = pBtip.time_derivative(N,system)
pCD = pOC + lC*C.x
pDtip = pCD + lD*D.x
points = [pDtip,pCD,pOC,pOA,pAB,pBtip]
eqs = []
eqs.append((pBtip-pDtip).dot(N.x))
eqs.append((pBtip-pDtip).dot(N.y))
constraint_system=KinematicConstraint(eqs)
variables = [qO, qA, qB, qC, qD]
guess = [initialvalues[item] for item in variables]
result = constraint_system.solve_numeric(variables,guess,system.constant_values)
ini = []
for item in system.get_state_variables():
if item in variables:
ini.append(result[item])
else:
ini.append(initialvalues[item])
points = PointsOutput(points, constant_values=system.constant_values)
points.calc(numpy.array([ini0,ini]),[0,1])
points.plot_time()
pAcm=pOA+lA/2*A.x
pBcm=pAB+lB/2*B.x
pCcm=pOC+lC/2*C.x
pDcm=pCD+lD/2*D.x
pEcm=pBtip -.1*E.y
pE1 = pEcm+lE/2*E.x
vE1 = pE1.time_derivative(N,system)
pE2 = pEcm-lE/2*E.x
vE2 = pE2.time_derivative(N,system)
wOA = O.get_w_to(A)
wAB = A.get_w_to(B)
wOC = O.get_w_to(C)
wCD = C.get_w_to(D)
wBD = B.get_w_to(D)
wOE = O.get_w_to(E)
BodyO = Body('BodyO',O,pOcm,mO,Dyadic.build(O,I_main,I_main,I_main),system)
#BodyA = Body('BodyA',A,pAcm,mA,Dyadic.build(A,I_leg,I_leg,I_leg),system)
#BodyB = Body('BodyB',B,pBcm,mB,Dyadic.build(B,I_leg,I_leg,I_leg),system)
#BodyC = Body('BodyC',C,pCcm,mC,Dyadic.build(C,I_leg,I_leg,I_leg),system)
#BodyD = Body('BodyD',D,pDcm,mD,Dyadic.build(D,I_leg,I_leg,I_leg),system)
BodyE = Body('BodyE',E,pEcm,mE,Dyadic.build(D,I_leg,I_leg,I_leg),system)
ParticleA = Particle(pAcm,mA,'ParticleA')
ParticleB = Particle(pBcm,mB,'ParticleB')
ParticleC = Particle(pCcm,mC,'ParticleC')
ParticleD = Particle(pDcm,mD,'ParticleD')
#ParticleE = Particle(pEcm,mE,'ParticleE')
system.addforce(-b*wOA,wOA)
system.addforce(-b*wAB,wAB)
system.addforce(-b*wOC,wOC)
system.addforce(-b*wCD,wCD)
system.addforce(-b_ankle*wOE,wOE)
#
stretch1 = -pE1.dot(N.y)
stretch1_s = (stretch1+abs(stretch1))
on = stretch1_s/(2*stretch1+1e-10)
system.add_spring_force1(k_constraint,-stretch1_s*N.y,vE1)
system.addforce(-b_constraint*vE1*on,vE1)
toeforce = k_constraint*-stretch1_s
stretch2 = -pE2.dot(N.y)
stretch2_s = (stretch2+abs(stretch2))
on = stretch2_s/(2*stretch2+1e-10)
system.add_spring_force1(k_constraint,-stretch2_s*N.y,vE2)
system.addforce(-b_constraint*vE2*on,vE2)
heelforce = k_constraint*-stretch2_s
system.add_spring_force1(k,(qA-qO-preload1)*N.z,wOA)
system.add_spring_force1(k,(qB-qA-preload2)*N.z,wAB)
system.add_spring_force1(k,(qC-qO-preload3)*N.z,wOC)
system.add_spring_force1(k,(qD-qC-preload4)*N.z,wCD)
system.add_spring_force1(k,(qD-qB-preload5)*N.z,wBD)
system.add_spring_force1(k_ankle,(qE-qO-preload6)*N.z,wOE)
system.addforcegravity(-g*N.y)
import pynamics.time_series
x = [0,5,5,7,7,9,9,10]
y = [0,0,1,1,-1,-1,0,0]
my_signal, ft2 = pynamics.time_series.build_smoothed_time_signal(x,y,t,'my_signal',window_time_width = .1)
torque = my_signal*stall_torque
system.addforce(torque*O.z,wOA)
system.addforce(-torque*O.z,wOC)
#
eq = []
eq.append(pBtip-pDtip)
eq_d= [item.time_derivative() for item in eq]
eq_dd= [item.time_derivative() for item in eq_d]
eq_dd_scalar = []
eq_dd_scalar.append(eq_dd[0].dot(N.x))
eq_dd_scalar.append(eq_dd[0].dot(N.y))
c = AccelerationConstraint(eq_dd_scalar)
# c.linearize(0)
system.add_constraint(c)
#
f,ma = system.getdynamics()
func1 = system.state_space_post_invert(f,ma,constants = system.constant_values,variable_functions = {my_signal:ft2})
states=pynamics.integration.integrate_odeint(func1,ini,t,rtol=tol,atol=tol)
KE = system.get_KE()
PE = system.getPEGravity(0*N.x) - system.getPESprings()
energy = Output([KE-PE,toeforce,heelforce])
energy.calc(states,t)
energy.plot_time()
#torque_plot = Output([torque])
#torque_plot.calc(states,t)
#torque_plot.plot_time()
points = [pDtip,pCD,pOC,pOA,pAB,pBtip,pE1,pE2,pBtip]
points = PointsOutput(points)
y = points.calc(states,t)
y = y.reshape((-1,9,2))
plt.figure()
for item in y[::30]:
plt.plot(*(item.T))
#points.animate(fps = 30, movie_name='parallel_five_bar_jumper_foot.mp4',lw=2)
| 2.671875 | 3 |
Comparison_fun_2.py | MarianaCandamil/Kocolatl | 0 | 12791366 | <filename>Comparison_fun_2.py
#====Librerias usadas==========================================================
# Manejo de datos
#import pandas as pd
#multiples data frames
#from pathlib import Path
#libreria para manejar opciones del sistema
import os
#==Funciones auxiliares========================================================
def match_name(file,loc_result,energy=False):
if os.path.exists(loc_result)==False:
os.mkdir(loc_result)
if type(file)==str:
loc_origen=loc_result+file.split('/')[1]
else:
loc_origen=loc_result+file[0].split('/')[1]
if os.path.exists(loc_origen)==False:
os.mkdir(loc_origen)
if energy==2:
type_energy=['H','M','L']
n_folder=[]
for j in range(3):
Loc_final=loc_origen+'/'+type_energy[j]
if os.path.exists(Loc_final)==False:
os.mkdir(Loc_final)
if type(file)==str:
Folder=Loc_final+'/match_'+file.split('/')[2].replace('.csv','')
names=file.split('/')[2].replace('.csv','')
n_folder.append(Folder)
else:
Folder,names=list_file(file,Loc_final+'/')
n_folder=n_folder+Folder
else:
if type(file)==str:
n_folder=loc_origen+'/match_'+file.split('/')[2].replace('.csv','')
names=file.split('/')[2].replace('.csv','')
else:
n_folder,names=list_file(file,loc_origen+'/')
return n_folder
def list_file(file,loc_result):
n_folder=[];names=[]
for i in range(len(file)):
list_file=file[i].split('/')[2]
list_file=list_file.replace('.csv','')
names.append(list_file)
list_file=('match_'+list_file)
n_folder.append(loc_result+list_file)
return n_folder,names
def select_match(file_exp,file_theo):
list_match=[]
for i in range(len(file_exp)):
name=file_exp[i].split('/')[1]
origen=name.split('_')[1]
origen=origen.replace('.csv','')
for k in range(len(file_theo)):
if origen in file_theo[k]:
list_match.append([file_theo[k],file_exp[i],name.replace('.csv',''),origen])
break
return list_match
def file(file_exp,file_theo):
list_match=[]
for i in range(len(file_exp)):
name=file_exp[i].split('/')[1]
origen=name.split('_')[1]
origen=origen.replace('.csv','')
for k in range(len(file_theo)):
if origen in file_theo[k]:
list_match.append([file_theo[k],file_exp[i],name.replace('.csv',''),origen])
break
return list_match | 2.953125 | 3 |
examples/copernicus_downloader.py | freol35241/voyapt | 3 | 12791367 | from pathlib import Path
import cdsapi
YEARS = [2019]
MONTHS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
ROOT = Path("wind_data")
ROOT.mkdir(exist_ok=True)
c = cdsapi.Client(key="YOUR_API_KEY")
for year in YEARS:
for month in MONTHS:
month = str(month).zfill(2)
c.retrieve(
"reanalysis-era5-single-levels",
{
"product_type": "reanalysis",
"format": "netcdf",
"variable": [
"10m_u_component_of_wind",
"10m_v_component_of_wind",
],
"year": str(year),
"month": month,
"day": [
"01",
"02",
"03",
"04",
"05",
"06",
"07",
"08",
"09",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"23",
"24",
"25",
"26",
"27",
"28",
"29",
"30",
"31",
],
"time": [
"00:00",
"06:00",
"12:00",
"18:00",
],
},
str(ROOT / f"CDS_wind_{year}_{month}.nc"),
)
| 2.34375 | 2 |
resources/user.py | jacoboviii/flask-rest-api | 0 | 12791368 | <reponame>jacoboviii/flask-rest-api
from typing import Dict, Tuple
from flask_restful import Resource, reqparse
from models.user import UserModel
class UserRegister(Resource):
parser = reqparse.RequestParser()
parser.add_argument(
"username", type=str, required=True, help="This field cannot be left blank!"
)
parser.add_argument(
"password", type=str, required=True, help="This field cannot be left blank!"
)
def post(self) -> Tuple[Dict[str, str], int]:
data = UserRegister.parser.parse_args()
if UserModel.find_by_username(data["username"]):
return {"message": "A user with that username already exists."}, 400
user = UserModel(**data)
try:
user.save_to_db()
except Exception as e:
return {"message", "An error ocurred creating the user."}, 500
return {"message": "User created successfully"}, 201
| 2.96875 | 3 |
Test/FunctionalTests/LegacyTests/FsmEditorLegacyTestScripts/TestGetBounds.py | migueldeicaza/ATF | 1 | 12791369 | <reponame>migueldeicaza/ATF
#Copyright (c) 2014 Sony Computer Entertainment America LLC. See License.txt.
import System.Xml
import Scea.Dom
import Test
#create new document
print editor
fsmDocument = atfFile.OpenNewDocument(editor)
fsmDocument.CircuitControl.Style.SnapToGrid = False
#create 2 states
namespace = r'http://www.scea.com/FSM/1_0'
stateTypeName= System.Xml.XmlQualifiedName( 'stateType', namespace)
stateType = DomSchemaRegistry.GetComplexType(stateTypeName)
state1 = DomObject(stateType)
state2 = DomObject(stateType)
list = List[DomObject]()
list.AddRange([state1, state2])
editor.Insert(list)
Test.Equal(2, fsmDocument.Circuit.Elements.Count, "verify 2 elements inserted")
#place two elements apart from each other, note Element.Position only acccepts integers
fsmDocument.Circuit.Elements[0].Position = Point(96, 128)
fsmDocument.Circuit.Elements[1].Position = Point(192, 228)
bounds = fsmDocument.CircuitControl.GetBoundsF(fsmDocument.Circuit.Elements)
bound1 = fsmDocument.CircuitControl.GetBoundsF(fsmDocument.Circuit.Elements[0])
bound2 = fsmDocument.CircuitControl.GetBoundsF(fsmDocument.Circuit.Elements[1])
# varify bounds
Test.True(bounds.Contains(bound1), "varify bounds enclose individual elements")
Test.True(bounds.Contains(bound2), "varify bounds enclose individual elements")
print Test.SUCCESS | 2.390625 | 2 |
gramex/apps/guide/websockethandler/websocketchat.py | joshuamosesb/gramex | 2 | 12791370 | import time
from random import choice
from tornado.ioloop import PeriodicCallback
from nltk.chat.util import Chat, reflections
from nltk.chat.eliza import pairs
chat_info = {}
idle_phrases = [
"Are you still there?",
"Would you like to say something?",
"If you're busy, we can talk later.",
"What are you thinking?",
"Got distracted, did you?",
"Let's change the topic. What makes you happy?",
"Let's talk about something else. When did you last travel?",
"Let's meditate for a few minutes.",
"I'll take a short break. Ping me when you're back.",
]
def open(handler):
# Send an introductory message
handler.write_message('Hello. How are you feeling today?')
# Set up chat configuration in the session
chat = chat_info[handler.session['id']] = {
# This is the Eliza bot that will converse with the user
'bot': Chat(pairs, reflections),
# The time at which the user last sent a message. Used for idle messages
'time': time.time(),
# Schedule a periodic check
'callback': PeriodicCallback(idler(handler), callback_time=5000),
# Send the next idle message after this many seconds.
# This is doubled after every idle message, and reset when the user responds
'delay': 10,
}
chat['callback'].start()
def on_message(handler, message):
# When we receive a message, respond with the chatbot response
chat = chat_info[handler.session['id']]
handler.write_message(chat['bot'].respond(message))
# Note the time of the last message. Reset the idle delay time
chat.update(time=time.time(), delay=10)
def on_close(handler):
# Stop periodic callback on
session = handler.session['id']
chat_info[session]['callback'].stop()
chat_info.pop(session)
def idler(handler):
# Return a method that can be called periodically to send idle messages.
# The handler parameter we get here is stored to send future messages.
def method():
'''
If delay seconds have elapsed since last message, send an idle message.
Then double the delay so that we don't keep sending idle messages.
'''
now = time.time()
chat = chat_info[handler.session['id']]
if chat['time'] < now - chat['delay']:
handler.write_message(choice(idle_phrases))
chat['time'] = now
chat['delay'] = chat['delay'] * 2
return method
| 3.0625 | 3 |
mantisshrimp/hub/detr/detr_pretrained_checkpoint_base.py | ramaneswaran/mantisshrimp | 0 | 12791371 | <reponame>ramaneswaran/mantisshrimp
__all__ = ["detr_pretrained_checkpoint_base"]
from mantisshrimp.imports import *
def detr_pretrained_checkpoint_base():
# load checkpoint and delete head
url = "https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth"
checkpoint = torch.hub.load_state_dict_from_url(
url, progress=False, map_location="cpu"
)
del checkpoint["model"]["class_embed.weight"]
del checkpoint["model"]["class_embed.bias"]
save_path = os.path.join(torch.hub._get_torch_home(), "detr-r50_no-class-head.pth")
torch.save(checkpoint, save_path)
return save_path
| 1.734375 | 2 |
devices/fan.py | tmkasun/jetson-gpio-device-controller | 0 | 12791372 | #!/usr/bin/env python3
import logging
import os
import signal
import sys
from .device import Device
class Fan(Device):
@staticmethod
def logTemperature():
process = os.popen(
"cat /sys/devices/virtual/thermal/thermal_zone*/temp")
stdout = process.read()
zones = [
"AO-therm",
"CPU-therm",
"GPU-therm",
"PLL-therm",
"PMIC-Die (Not real)",
"thermal-fan-est"
]
temperatures = stdout.split("\n")
for temperature_index in range(len(temperatures)):
c_temp = temperatures[temperature_index]
if c_temp is not '':
logging.info(
"{} ----> {} C".format(zones[temperature_index], int(c_temp)/1000))
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)s: %(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
handlers=[
logging.FileHandler("test.log"),
logging.StreamHandler()
])
PID_FILE = "pro.pid"
def refreshPID(killOnly=False):
current_pid = os.getpid()
with open(PID_FILE, 'w+') as pid:
previous_pid = pid.readline()
if not len(previous_pid) is 0:
os.kill(int(previous_pid), signal.SIGTERM)
if not killOnly:
logging.info(
"Starting A/C controller in PID {}".format(current_pid))
pid.write(str(current_pid))
def cleanup(device):
device.shutdown()
logging.shutdown()
os.remove(PID_FILE)
def main(argv):
fan = Fan("Normal Fan", 11)
if len(argv) is 1 and argv[0] == "stop":
refreshPID(True)
cleanup(fan)
logging.warning(
"Killed existing stale process and stopping the device !!")
return
onTime = 2
offTime = 2
if len(argv) is 2:
onTime = float(argv[0])
offTime = float(argv[1])
refreshPID()
try:
while True:
Fan.logTemperature()
fan.turnOn(onTime)
Fan.logTemperature()
fan.turnOff(offTime)
except KeyboardInterrupt as identifier:
logging.error("Keyboard interrupt occurred, Gracefully closing . . .")
finally:
cleanup(fan)
if __name__ == "__main__":
main(sys.argv[1:])
| 2.59375 | 3 |
Flash.py | Gu-Youngfeng/Config-Optimization | 2 | 12791373 | #!\usr\bin\python
# coding=utf-8
# Author: youngfeng
# Update: 07/16/2018
"""
Flash, proposed by Nair et al. (arXiv '18), which aims to find the (near) optimal configuration in unevaluated set.
STEP 1: select 80%% of original data as dataset
STEP 2: split the dataset into training set (30 configs) and unevaluated set (remaining configs)
STEP 3: predict the optimal configuration in unevaluated set, then remove it from unevaluated set to training set.
STEP 4: repeat the STEP 4 until the budget (50 configs) is loss out.
The details of Progressive are introduced in paper "Finding Faster Configurations using FLASH".
"""
import pandas as pd
import random as rd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
class config_node:
"""
for each configuration, we create a config_node object to save its informations
index : actual rank
features : feature list
perfs : actual performance
"""
def __init__(self, index, features, perfs, predicted):
self.index = index
self.features = features
self.perfs = perfs
self.predicted = predicted
def remove_by_index(config_pool, index):
"""
remove the selected configuration
"""
for config in config_pool:
if config.index == index:
config_pool.remove(config)
break
return config_pool
def find_lowest_rank(train_set, test_set):
"""
return the lowest rank in top 10
"""
sorted_test = sorted(test_set, key=lambda x: x.perfs[-1])
# train data
train_features = [t.features for t in train_set]
train_perfs = [t.perfs[-1] for t in train_set]
# test data
test_perfs = [t.features for t in sorted_test]
cart_model = DecisionTreeRegressor()
cart_model.fit(train_features, train_perfs)
predicted = cart_model.predict(test_perfs)
predicted_id = [[i, p] for i, p in enumerate(predicted)]
# i-> actual rank, p -> predicted value
predicted_sorted = sorted(predicted_id, key=lambda x: x[-1])
# print(predicted_sorted)
# assigning predicted ranks
predicted_rank_sorted = [[p[0], p[-1], i] for i,p in enumerate(predicted_sorted)]
# p[0] -> actual rank, p[-1] -> perdicted value, i -> predicted rank
select_few = predicted_rank_sorted[:10]
# print the predcited top-10 configuration
# for sf in select_few:
# print("actual rank:", sf[0], " actual value:", sorted_test[sf[0]].perfs[-1], " predicted value:", sf[1], " predicted rank:", sf[2])
# print("-------------")
return np.min([sf[0] for sf in select_few])
def predict_by_cart(train_set, test_set):
"""
return the predicted optimal condiguration
"""
train_features = [config.features for config in train_set]
train_perfs = [config.perfs[-1] for config in train_set]
test_features = [config.features for config in test_set]
cart_model = DecisionTreeRegressor()
cart_model.fit(train_features, train_perfs)
predicted = cart_model.predict(test_features)
predicted_id = [[i,p] for i,p in enumerate(predicted)]
predicted_sorted = sorted(predicted_id, key=lambda x: x[-1]) # sort test_set by predicted performance
return test_set[predicted_sorted[0][0]] # the optimal configuration
def split_data_by_fraction(csv_file, fraction):
"""
split data set and return the 80% data
"""
# step1: read from csv file
pdcontent = pd.read_csv(csv_file)
attr_list = pdcontent.columns # all feature list
# step2: split attribute - method 1
features = [i for i in attr_list if "$<" not in i]
perfs = [i for i in attr_list if "$<" in i]
sortedcontent = pdcontent.sort_values(perfs[-1]) # from small to big
# print(len(sortedcontent))
# step3: collect configuration
configs = list()
for c in range(len(pdcontent)):
configs.append(config_node(c, # actual rank
sortedcontent.iloc[c][features].tolist(), # feature list
sortedcontent.iloc[c][perfs].tolist(), # performance list
sortedcontent.iloc[c][perfs].tolist(), # predicted performance list
))
# for config in configs:
# print(config.index, "-", config.perfs, "-", config.predicted, "-", config.rank)
# step4: data split
# fraction = 0.4 # split fraction
# rd.seed(seed) # random seed
rd.shuffle(configs) # shuffle the configs
indexes = range(len(configs))
train_index = indexes[:int(fraction*len(configs))]
dataset = [configs[i] for i in train_index]
# print(len(dataset))
return dataset
def predict_by_flash(dataset, size=30, budget=50):
"""
use the budget in dataset to train a best model,
return the train_set and unevaluated_set
"""
#initilize the train set with 30 configurations
rd.shuffle(dataset)
train_set = dataset[:size]
unevaluated_set = dataset
for config in train_set:
unevaluated_set = remove_by_index(unevaluated_set, config.index) # remove train_set
while budget >= 0: # budget equals to 50
optimal_config = predict_by_cart(train_set, unevaluated_set)
# print("[add]:", optimal_config.index)
unevaluated_set = remove_by_index(unevaluated_set, optimal_config.index)
train_set.append(optimal_config)
budget = budget - 1
return [train_set, unevaluated_set]
if __name__ == "__main__":
#######################################################################################
# select 80% data
dataset = split_data_by_fraction("data/Apache_AllMeasurements.csv", 0.8)
print("### initialzation")
for i in dataset:
print(str(i.index), ",", end="")
print("\n-------------")
data = predict_by_flash(dataset)
print("### finally split")
train_set = data[0]
uneval_set = data[1]
for i in train_set:
print(str(i.index), ",", end="")
print("\n-------------")
for i in uneval_set:
print(str(i.index), ",", end="")
print("\n-------------")
#######################################################################################
lowest_rank = find_lowest_rank(train_set, uneval_set)
print(lowest_rank)
| 3.125 | 3 |
debugging/check_athena.py | 094459/devday-elt-automation | 2 | 12791374 | <filename>debugging/check_athena.py
import os
import boto3
import sys
ath = boto3.client('athena')
try:
response = ath.get_database(
CatalogName='AwsDataCatalog',
DatabaseName='scifimovies'
)
print("Database found")
except:
print("No Database Found")
try:
response = ath.get_table_metadata(
CatalogName='AwsDataCatalog',
DatabaseName='scifimovies',
TableName='scifix'
)
print("Table Exists")
except:
print("No Table Found")
| 2.671875 | 3 |
external/opengl-registry/extensions/registry.py | FTD2012/CrossWindow | 6 | 12791375 | registry = {
'GL_3DFX_multisample' : {
'number' : 207,
'flags' : { 'public' },
'supporters' : { '3DFX' },
'url' : 'extensions/3DFX/3DFX_multisample.txt',
},
'GL_3DFX_tbuffer' : {
'number' : 208,
'flags' : { 'public' },
'supporters' : { '3DFX' },
'url' : 'extensions/3DFX/3DFX_tbuffer.txt',
},
'GL_3DFX_texture_compression_FXT1' : {
'number' : 206,
'flags' : { 'public' },
'supporters' : { '3DFX' },
'url' : 'extensions/3DFX/3DFX_texture_compression_FXT1.txt',
},
'GL_AMD_blend_minmax_factor' : {
'number' : 404,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_blend_minmax_factor.txt',
},
'GL_AMD_compressed_3DC_texture' : {
'esnumber' : 39,
'flags' : { 'public' },
'url' : 'extensions/AMD/AMD_compressed_3DC_texture.txt',
},
'GL_AMD_compressed_ATC_texture' : {
'esnumber' : 40,
'flags' : { 'public' },
'url' : 'extensions/AMD/AMD_compressed_ATC_texture.txt',
},
'GL_AMD_conservative_depth' : {
'number' : 385,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_conservative_depth.txt',
},
'GL_AMD_debug_output' : {
'number' : 395,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/AMD/AMD_debug_output.txt',
},
'GL_AMD_depth_clamp_separate' : {
'number' : 401,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/AMD/AMD_depth_clamp_separate.txt',
},
'GL_AMD_draw_buffers_blend' : {
'number' : 366,
'flags' : { 'public' },
'supporters' : { 'NVIDIA', 'TransGaming' },
'url' : 'extensions/AMD/AMD_draw_buffers_blend.txt',
},
'GL_AMD_framebuffer_sample_positions' : {
'number' : 454,
'flags' : { 'public' },
'url' : 'extensions/AMD/AMD_framebuffer_sample_positions.txt',
},
'GL_AMD_gcn_shader' : {
'number' : 453,
'flags' : { 'public' },
'url' : 'extensions/AMD/AMD_gcn_shader.txt',
},
'GLX_AMD_gpu_association' : {
'number' : 398,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/GLX_AMD_gpu_association.txt',
},
'GL_AMD_gpu_shader_half_float' : {
'number' : 496,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/AMD/AMD_gpu_shader_half_float.txt',
},
'GL_AMD_gpu_shader_half_float_fetch' : {
'number' : 519,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_gpu_shader_half_float_fetch.txt',
},
'GL_AMD_gpu_shader_int16' : {
'number' : 507,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_gpu_shader_int16.txt',
},
'GL_AMD_gpu_shader_int64' : {
'number' : 451,
'flags' : { 'public' },
'url' : 'extensions/AMD/AMD_gpu_shader_int64.txt',
},
'GL_AMD_interleaved_elements' : {
'number' : 431,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_interleaved_elements.txt',
},
'GL_AMD_multi_draw_indirect' : {
'number' : 408,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_multi_draw_indirect.txt',
},
'GL_AMD_name_gen_delete' : {
'number' : 394,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/AMD/AMD_name_gen_delete.txt',
},
'GL_AMD_occlusion_query_event' : {
'number' : 442,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_occlusion_query_event.txt',
},
'GL_AMD_performance_monitor' : {
'number' : 360,
'esnumber' : 50,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_performance_monitor.txt',
},
'GL_AMD_pinned_memory' : {
'number' : 411,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_pinned_memory.txt',
},
'GL_AMD_program_binary_Z400' : {
'esnumber' : 48,
'flags' : { 'public' },
'url' : 'extensions/AMD/AMD_program_binary_Z400.txt',
},
'GL_AMD_query_buffer_object' : {
'number' : 420,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_query_buffer_object.txt',
},
'GL_AMD_sample_positions' : {
'number' : 405,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_sample_positions.txt',
},
'GL_AMD_seamless_cubemap_per_texture' : {
'number' : 383,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_seamless_cubemap_per_texture.txt',
},
'GL_AMD_shader_atomic_counter_ops' : {
'number' : 435,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_shader_atomic_counter_ops.txt',
},
'GL_AMD_shader_ballot' : {
'number' : 497,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/AMD/AMD_shader_ballot.txt',
},
'GL_AMD_shader_explicit_vertex_parameter' : {
'number' : 485,
'flags' : { 'public' },
'url' : 'extensions/AMD/AMD_shader_explicit_vertex_parameter.txt',
},
'GL_AMD_shader_image_load_store_lod' : {
'number' : 513,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_shader_image_load_store_lod.txt',
},
'GL_AMD_shader_stencil_export' : {
'number' : 382,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_shader_stencil_export.txt',
},
'GL_AMD_shader_stencil_value_export' : {
'number' : 444,
'flags' : { 'public' },
'url' : 'extensions/AMD/AMD_shader_stencil_value_export.txt',
},
'GL_AMD_shader_trinary_minmax' : {
'number' : 428,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_shader_trinary_minmax.txt',
},
'GL_AMD_sparse_texture' : {
'number' : 426,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_sparse_texture.txt',
},
'GL_AMD_stencil_operation_extended' : {
'number' : 413,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_stencil_operation_extended.txt',
},
'GL_AMD_texture_gather_bias_lod' : {
'number' : 502,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_texture_gather_bias_lod.txt',
},
'GL_AMD_texture_texture4' : {
'number' : 362,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_texture_texture4.txt',
},
'GL_AMD_transform_feedback3_lines_triangles' : {
'number' : 397,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_transform_feedback3_lines_triangles.txt',
},
'GL_AMD_transform_feedback4' : {
'number' : 450,
'flags' : { 'public' },
'url' : 'extensions/AMD/AMD_transform_feedback4.txt',
},
'GL_AMD_vertex_shader_layer' : {
'number' : 417,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_vertex_shader_layer.txt',
},
'GL_AMD_vertex_shader_tessellator' : {
'number' : 363,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_vertex_shader_tessellator.txt',
},
'GL_AMD_vertex_shader_viewport_index' : {
'number' : 416,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/AMD_vertex_shader_viewport_index.txt',
},
'GL_ANDROID_extension_pack_es31a' : {
'esnumber' : 187,
'flags' : { 'public' },
'url' : 'extensions/ANDROID/ANDROID_extension_pack_es31a.txt',
},
'GL_ANGLE_depth_texture' : {
'esnumber' : 138,
'flags' : { 'public' },
'url' : 'extensions/ANGLE/ANGLE_depth_texture.txt',
},
'GL_ANGLE_framebuffer_blit' : {
'esnumber' : 83,
'flags' : { 'public' },
'url' : 'extensions/ANGLE/ANGLE_framebuffer_blit.txt',
},
'GL_ANGLE_framebuffer_multisample' : {
'esnumber' : 84,
'flags' : { 'public' },
'url' : 'extensions/ANGLE/ANGLE_framebuffer_multisample.txt',
},
'GL_ANGLE_instanced_arrays' : {
'esnumber' : 109,
'flags' : { 'public' },
'url' : 'extensions/ANGLE/ANGLE_instanced_arrays.txt',
},
'GL_ANGLE_pack_reverse_row_order' : {
'esnumber' : 110,
'flags' : { 'public' },
'url' : 'extensions/ANGLE/ANGLE_pack_reverse_row_order.txt',
},
'GL_ANGLE_program_binary' : {
'esnumber' : 139,
'flags' : { 'public' },
'url' : 'extensions/ANGLE/ANGLE_program_binary.txt',
},
'GL_ANGLE_texture_compression_dxt3' : {
'esnumber' : 111,
'flags' : { 'public' },
'url' : 'extensions/ANGLE/ANGLE_texture_compression_dxt.txt',
'alias' : { 'GL_ANGLE_texture_compression_dxt1', 'GL_ANGLE_texture_compression_dxt5' },
},
'GL_ANGLE_texture_usage' : {
'esnumber' : 112,
'flags' : { 'public' },
'url' : 'extensions/ANGLE/ANGLE_texture_usage.txt',
},
'GL_ANGLE_translated_shader_source' : {
'esnumber' : 113,
'flags' : { 'public' },
'url' : 'extensions/ANGLE/ANGLE_translated_shader_source.txt',
},
'GL_APPLE_aux_depth_stencil' : {
'number' : 370,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_aux_depth_stencil.txt',
},
'GL_APPLE_client_storage' : {
'number' : 270,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_client_storage.txt',
},
'GL_APPLE_clip_distance' : {
'esnumber' : 193,
'flags' : { 'public' },
'url' : 'extensions/APPLE/APPLE_clip_distance.txt',
},
'GL_APPLE_color_buffer_packed_float' : {
'esnumber' : 194,
'flags' : { 'public' },
'url' : 'extensions/APPLE/APPLE_color_buffer_packed_float.txt',
},
'GL_APPLE_copy_texture_levels' : {
'esnumber' : 123,
'flags' : { 'public' },
'url' : 'extensions/APPLE/APPLE_copy_texture_levels.txt',
},
'GL_APPLE_element_array' : {
'number' : 271,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_element_array.txt',
},
'GL_APPLE_fence' : {
'number' : 272,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_fence.txt',
},
'GL_APPLE_float_pixels' : {
'number' : 368,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_float_pixels.txt',
},
'GL_APPLE_flush_buffer_range' : {
'number' : 321,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_flush_buffer_range.txt',
},
'GL_APPLE_framebuffer_multisample' : {
'esnumber' : 78,
'flags' : { 'public' },
'url' : 'extensions/APPLE/APPLE_framebuffer_multisample.txt',
},
'GL_APPLE_object_purgeable' : {
'number' : 371,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_object_purgeable.txt',
},
'GL_APPLE_rgb_422' : {
'number' : 373,
'esnumber' : 76,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_rgb_422.txt',
},
'GL_APPLE_row_bytes' : {
'number' : 372,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_row_bytes.txt',
},
'GL_APPLE_specular_vector' : {
'number' : 159,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_specular_vector.txt',
},
'GL_APPLE_sync' : {
'esnumber' : 124,
'flags' : { 'public' },
'url' : 'extensions/APPLE/APPLE_sync.txt',
},
'GL_APPLE_texture_2D_limited_npot' : {
'esnumber' : 59,
'flags' : { 'public' },
'url' : 'extensions/APPLE/APPLE_texture_2D_limited_npot.txt',
},
'GL_APPLE_texture_format_BGRA8888' : {
'esnumber' : 79,
'flags' : { 'public' },
'url' : 'extensions/APPLE/APPLE_texture_format_BGRA8888.txt',
},
'GL_APPLE_texture_max_level' : {
'esnumber' : 80,
'flags' : { 'public' },
'url' : 'extensions/APPLE/APPLE_texture_max_level.txt',
},
'GL_APPLE_texture_packed_float' : {
'esnumber' : 195,
'flags' : { 'public' },
'url' : 'extensions/APPLE/APPLE_texture_packed_float.txt',
},
'GL_APPLE_texture_range' : {
'number' : 367,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_texture_range.txt',
},
'GL_APPLE_transform_hint' : {
'number' : 160,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_transform_hint.txt',
},
'GL_APPLE_vertex_array_object' : {
'number' : 273,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_vertex_array_object.txt',
},
'GL_APPLE_vertex_array_range' : {
'number' : 274,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_vertex_array_range.txt',
},
'GL_APPLE_vertex_program_evaluators' : {
'number' : 369,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_vertex_program_evaluators.txt',
},
'GL_APPLE_ycbcr_422' : {
'number' : 275,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/APPLE/APPLE_ycbcr_422.txt',
},
'GL_ARB_ES2_compatibility' : {
'arbnumber' : 95,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_ES2_compatibility.txt',
},
'GL_ARB_ES3_1_compatibility' : {
'arbnumber' : 159,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_ES3_1_compatibility.txt',
},
'GL_ARB_ES3_2_compatibility' : {
'arbnumber' : 176,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_ES3_2_compatibility.txt',
},
'GL_ARB_ES3_compatibility' : {
'arbnumber' : 127,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_ES3_compatibility.txt',
},
'GL_ARB_arrays_of_arrays' : {
'arbnumber' : 120,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_arrays_of_arrays.txt',
},
'GL_ARB_base_instance' : {
'arbnumber' : 107,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_base_instance.txt',
},
'GL_ARB_bindless_texture' : {
'arbnumber' : 152,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_bindless_texture.txt',
},
'GL_ARB_blend_func_extended' : {
'arbnumber' : 78,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_blend_func_extended.txt',
},
'GL_ARB_buffer_storage' : {
'arbnumber' : 144,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_buffer_storage.txt',
},
'GL_ARB_cl_event' : {
'arbnumber' : 103,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_cl_event.txt',
},
'GL_ARB_clear_buffer_object' : {
'arbnumber' : 121,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_clear_buffer_object.txt',
},
'GL_ARB_clear_texture' : {
'arbnumber' : 145,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_clear_texture.txt',
},
'GL_ARB_clip_control' : {
'arbnumber' : 160,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_clip_control.txt',
},
'GL_ARB_color_buffer_float' : {
'arbnumber' : 39,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_color_buffer_float.txt',
'alias' : { 'GLX_ARB_fbconfig_float', 'WGL_ARB_pixel_format_float' },
},
'GL_ARB_compatibility' : {
'arbnumber' : 58,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_compatibility.txt',
},
'GL_ARB_compressed_texture_pixel_storage' : {
'arbnumber' : 110,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_compressed_texture_pixel_storage.txt',
},
'GL_ARB_compute_shader' : {
'arbnumber' : 122,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_compute_shader.txt',
},
'GL_ARB_compute_variable_group_size' : {
'arbnumber' : 153,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_compute_variable_group_size.txt',
},
'GL_ARB_conditional_render_inverted' : {
'arbnumber' : 161,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_conditional_render_inverted.txt',
},
'GL_ARB_conservative_depth' : {
'arbnumber' : 111,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_conservative_depth.txt',
},
'GL_ARB_copy_buffer' : {
'arbnumber' : 59,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_copy_buffer.txt',
},
'GL_ARB_copy_image' : {
'arbnumber' : 123,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_copy_image.txt',
},
'GLX_ARB_create_context' : {
'arbnumber' : 56,
'flags' : { 'public' },
'url' : 'extensions/ARB/GLX_ARB_create_context.txt',
'comments' : 'Alias to GLX_ARB_create_context_profile not needed - see arbnumber 75.',
},
'GLX_ARB_create_context_no_error' : {
'arbnumber' : 191,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_create_context_no_error.txt',
'comments' : 'Shares extension spec with WGL_ARB_create_context_no_error.',
'alias' : { 'WGL_ARB_create_context_no_error' },
},
'GLX_ARB_create_context_profile' : {
'arbnumber' : 75,
'flags' : { 'public' },
'url' : 'extensions/ARB/GLX_ARB_create_context.txt',
'comments' : 'Included with arbnumber 56, GLX_ARB_create_context.',
},
'GLX_ARB_create_context_robustness' : {
'arbnumber' : 101,
'flags' : { 'public' },
'url' : 'extensions/ARB/GLX_ARB_create_context_robustness.txt',
},
'GL_ARB_cull_distance' : {
'arbnumber' : 162,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_cull_distance.txt',
},
'GL_ARB_debug_output' : {
'arbnumber' : 104,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_debug_output.txt',
},
'GL_ARB_depth_buffer_float' : {
'arbnumber' : 43,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_depth_buffer_float.txt',
},
'GL_ARB_depth_clamp' : {
'arbnumber' : 61,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_depth_clamp.txt',
},
'GL_ARB_depth_texture' : {
'arbnumber' : 22,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_depth_texture.txt',
},
'GL_ARB_derivative_control' : {
'arbnumber' : 163,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_derivative_control.txt',
},
'GL_ARB_direct_state_access' : {
'arbnumber' : 164,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_direct_state_access.txt',
},
'GL_ARB_draw_buffers' : {
'arbnumber' : 37,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_draw_buffers.txt',
},
'GL_ARB_draw_buffers_blend' : {
'arbnumber' : 69,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_draw_buffers_blend.txt',
},
'GL_ARB_draw_elements_base_vertex' : {
'arbnumber' : 62,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_draw_elements_base_vertex.txt',
},
'GL_ARB_draw_indirect' : {
'arbnumber' : 87,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_draw_indirect.txt',
},
'GL_ARB_draw_instanced' : {
'arbnumber' : 44,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_draw_instanced.txt',
},
'GL_ARB_enhanced_layouts' : {
'arbnumber' : 146,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_enhanced_layouts.txt',
},
'GL_ARB_explicit_attrib_location' : {
'arbnumber' : 79,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_explicit_attrib_location.txt',
},
'GL_ARB_explicit_uniform_location' : {
'arbnumber' : 128,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_explicit_uniform_location.txt',
},
'GL_ARB_fragment_coord_conventions' : {
'arbnumber' : 63,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_fragment_coord_conventions.txt',
},
'GL_ARB_fragment_layer_viewport' : {
'arbnumber' : 129,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_fragment_layer_viewport.txt',
},
'GL_ARB_fragment_program' : {
'arbnumber' : 27,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_fragment_program.txt',
},
'GL_ARB_fragment_program_shadow' : {
'arbnumber' : 36,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_fragment_program_shadow.txt',
},
'GL_ARB_fragment_shader' : {
'arbnumber' : 32,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_fragment_shader.txt',
},
'GL_ARB_fragment_shader_interlock' : {
'arbnumber' : 177,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_fragment_shader_interlock.txt',
},
'GL_ARB_framebuffer_no_attachments' : {
'arbnumber' : 130,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_framebuffer_no_attachments.txt',
},
'GL_ARB_framebuffer_object' : {
'arbnumber' : 45,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_framebuffer_object.txt',
},
'GL_ARB_framebuffer_sRGB' : {
'arbnumber' : 46,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_framebuffer_sRGB.txt',
'alias' : { 'GLX_ARB_framebuffer_sRGB', 'WGL_ARB_framebuffer_sRGB' },
},
'GL_ARB_geometry_shader4' : {
'arbnumber' : 47,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_geometry_shader4.txt',
},
'GLX_ARB_get_proc_address' : {
'arbnumber' : 2,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/GLX_ARB_get_proc_address.txt',
},
'GL_ARB_get_program_binary' : {
'arbnumber' : 96,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_get_program_binary.txt',
},
'GL_ARB_get_texture_sub_image' : {
'arbnumber' : 165,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_get_texture_sub_image.txt',
},
'GL_ARB_gl_spirv' : {
'arbnumber' : 190,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_gl_spirv.txt',
},
'GL_ARB_gpu_shader5' : {
'arbnumber' : 88,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_gpu_shader5.txt',
},
'GL_ARB_gpu_shader_fp64' : {
'arbnumber' : 89,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_gpu_shader_fp64.txt',
},
'GL_ARB_gpu_shader_int64' : {
'arbnumber' : 178,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_gpu_shader_int64.txt',
},
'GL_ARB_half_float_pixel' : {
'arbnumber' : 40,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_half_float_pixel.txt',
},
'GL_ARB_half_float_vertex' : {
'arbnumber' : 48,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_half_float_vertex.txt',
},
'GL_ARB_indirect_parameters' : {
'arbnumber' : 154,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_indirect_parameters.txt',
},
'GL_ARB_instanced_arrays' : {
'arbnumber' : 49,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_instanced_arrays.txt',
},
'GL_ARB_internalformat_query' : {
'arbnumber' : 112,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_internalformat_query.txt',
},
'GL_ARB_internalformat_query2' : {
'arbnumber' : 131,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_internalformat_query2.txt',
},
'GL_ARB_invalidate_subdata' : {
'arbnumber' : 132,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_invalidate_subdata.txt',
},
'GL_ARB_map_buffer_alignment' : {
'arbnumber' : 113,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_map_buffer_alignment.txt',
},
'GL_ARB_map_buffer_range' : {
'arbnumber' : 50,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_map_buffer_range.txt',
},
'GL_ARB_matrix_palette' : {
'arbnumber' : 16,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_matrix_palette.txt',
},
'GL_ARB_multi_bind' : {
'arbnumber' : 147,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_multi_bind.txt',
},
'GL_ARB_multi_draw_indirect' : {
'arbnumber' : 133,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_multi_draw_indirect.txt',
},
'GL_ARB_multisample' : {
'arbnumber' : 5,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_multisample.txt',
'alias' : { 'GLX_ARB_multisample', 'WGL_ARB_multisample' },
},
'GL_ARB_multitexture' : {
'arbnumber' : 1,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_multitexture.txt',
},
'GL_ARB_occlusion_query' : {
'arbnumber' : 29,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_occlusion_query.txt',
},
'GL_ARB_occlusion_query2' : {
'arbnumber' : 80,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_occlusion_query2.txt',
},
'GL_ARB_parallel_shader_compile' : {
'arbnumber' : 179,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_parallel_shader_compile.txt',
},
'GL_ARB_pipeline_statistics_query' : {
'arbnumber' : 171,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_pipeline_statistics_query.txt',
},
'GL_ARB_pixel_buffer_object' : {
'arbnumber' : 42,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_pixel_buffer_object.txt',
},
'GL_ARB_point_parameters' : {
'arbnumber' : 14,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_point_parameters.txt',
},
'GL_ARB_point_sprite' : {
'arbnumber' : 35,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_point_sprite.txt',
},
'GL_ARB_polygon_offset_clamp' : {
'arbnumber' : 193,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_polygon_offset_clamp.txt',
},
'GL_ARB_post_depth_coverage' : {
'arbnumber' : 180,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_post_depth_coverage.txt',
},
'GL_ARB_program_interface_query' : {
'arbnumber' : 134,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_program_interface_query.txt',
},
'GL_ARB_provoking_vertex' : {
'arbnumber' : 64,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_provoking_vertex.txt',
},
'GL_ARB_query_buffer_object' : {
'arbnumber' : 148,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_query_buffer_object.txt',
},
'GL_ARB_robust_buffer_access_behavior' : {
'arbnumber' : 135,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_robust_buffer_access_behavior.txt',
},
'GL_ARB_robustness' : {
'arbnumber' : 105,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_robustness.txt',
},
'GLX_ARB_robustness_application_isolation' : {
'arbnumber' : 142,
'flags' : { 'public' },
'url' : 'extensions/ARB/GLX_ARB_robustness_application_isolation.txt',
'alias' : { 'GLX_ARB_robustness_share_group_isolation' },
},
'GL_ARB_robustness_isolation' : {
'arbnumber' : 126,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_robustness_application_isolation.txt',
'alias' : { 'GL_ARB_robustness_share_group_isolation' },
},
'GL_ARB_sample_locations' : {
'arbnumber' : 181,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_sample_locations.txt',
},
'GL_ARB_sample_shading' : {
'arbnumber' : 70,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_sample_shading.txt',
},
'GL_ARB_sampler_objects' : {
'arbnumber' : 81,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_sampler_objects.txt',
},
'GL_ARB_seamless_cube_map' : {
'arbnumber' : 65,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_seamless_cube_map.txt',
},
'GL_ARB_seamless_cubemap_per_texture' : {
'arbnumber' : 155,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_seamless_cubemap_per_texture.txt',
},
'GL_ARB_separate_shader_objects' : {
'arbnumber' : 97,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_separate_shader_objects.txt',
},
'GL_ARB_shader_atomic_counter_ops' : {
'arbnumber' : 182,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_atomic_counter_ops.txt',
},
'GL_ARB_shader_atomic_counters' : {
'arbnumber' : 114,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_atomic_counters.txt',
},
'GL_ARB_shader_ballot' : {
'arbnumber' : 183,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_ballot.txt',
},
'GL_ARB_shader_bit_encoding' : {
'arbnumber' : 82,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_bit_encoding.txt',
},
'GL_ARB_shader_clock' : {
'arbnumber' : 184,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_clock.txt',
},
'GL_ARB_shader_draw_parameters' : {
'arbnumber' : 156,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_draw_parameters.txt',
},
'GL_ARB_shader_group_vote' : {
'arbnumber' : 157,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_group_vote.txt',
},
'GL_ARB_shader_image_load_store' : {
'arbnumber' : 115,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_image_load_store.txt',
},
'GL_ARB_shader_image_size' : {
'arbnumber' : 136,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_image_size.txt',
},
'GL_ARB_shader_objects' : {
'arbnumber' : 30,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_shader_objects.txt',
},
'GL_ARB_shader_precision' : {
'arbnumber' : 98,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_precision.txt',
},
'GL_ARB_shader_stencil_export' : {
'arbnumber' : 106,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_stencil_export.txt',
},
'GL_ARB_shader_storage_buffer_object' : {
'arbnumber' : 137,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_storage_buffer_object.txt',
},
'GL_ARB_shader_subroutine' : {
'arbnumber' : 90,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_subroutine.txt',
},
'GL_ARB_shader_texture_image_samples' : {
'arbnumber' : 166,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_texture_image_samples.txt',
},
'GL_ARB_shader_texture_lod' : {
'arbnumber' : 60,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_texture_lod.txt',
},
'GL_ARB_shader_viewport_layer_array' : {
'arbnumber' : 185,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shader_viewport_layer_array.txt',
},
'GL_ARB_shading_language_100' : {
'arbnumber' : 33,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_shading_language_100.txt',
},
'GL_ARB_shading_language_420pack' : {
'arbnumber' : 108,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shading_language_420pack.txt',
},
'GL_ARB_shading_language_include' : {
'arbnumber' : 76,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shading_language_include.txt',
},
'GL_ARB_shading_language_packing' : {
'arbnumber' : 116,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_shading_language_packing.txt',
},
'GL_ARB_shadow' : {
'arbnumber' : 23,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_shadow.txt',
},
'GL_ARB_shadow_ambient' : {
'arbnumber' : 24,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_shadow_ambient.txt',
},
'GL_ARB_sparse_buffer' : {
'arbnumber' : 172,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_sparse_buffer.txt',
},
'GL_ARB_sparse_texture' : {
'arbnumber' : 158,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_sparse_texture.txt',
},
'GL_ARB_sparse_texture2' : {
'arbnumber' : 186,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_sparse_texture2.txt',
},
'GL_ARB_sparse_texture_clamp' : {
'arbnumber' : 187,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_sparse_texture_clamp.txt',
},
'GL_ARB_spirv_extensions' : {
'arbnumber' : 194,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_spirv_extensions.txt',
},
'GL_ARB_stencil_texturing' : {
'arbnumber' : 138,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_stencil_texturing.txt',
},
'GL_ARB_sync' : {
'arbnumber' : 66,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_sync.txt',
},
'GL_ARB_tessellation_shader' : {
'arbnumber' : 91,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_tessellation_shader.txt',
},
'GL_ARB_texture_barrier' : {
'arbnumber' : 167,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_barrier.txt',
},
'GL_ARB_texture_border_clamp' : {
'arbnumber' : 13,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_border_clamp.txt',
},
'GL_ARB_texture_buffer_object' : {
'arbnumber' : 51,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_buffer_object.txt',
},
'GL_ARB_texture_buffer_object_rgb32' : {
'arbnumber' : 92,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_buffer_object_rgb32.txt',
},
'GL_ARB_texture_buffer_range' : {
'arbnumber' : 139,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_buffer_range.txt',
},
'GL_ARB_texture_compression' : {
'arbnumber' : 12,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_compression.txt',
},
'GL_ARB_texture_compression_bptc' : {
'arbnumber' : 77,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_compression_bptc.txt',
},
'GL_ARB_texture_compression_rgtc' : {
'arbnumber' : 52,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_compression_rgtc.txt',
},
'GL_ARB_texture_cube_map' : {
'arbnumber' : 7,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_cube_map.txt',
},
'GL_ARB_texture_cube_map_array' : {
'arbnumber' : 71,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_cube_map_array.txt',
},
'GL_ARB_texture_env_add' : {
'arbnumber' : 6,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_env_add.txt',
},
'GL_ARB_texture_env_combine' : {
'arbnumber' : 17,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_env_combine.txt',
},
'GL_ARB_texture_env_crossbar' : {
'arbnumber' : 18,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_env_crossbar.txt',
},
'GL_ARB_texture_env_dot3' : {
'arbnumber' : 19,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_env_dot3.txt',
},
'GL_ARB_texture_filter_anisotropic' : {
'arbnumber' : 195,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_filter_anisotropic.txt',
},
'GL_ARB_texture_filter_minmax' : {
'arbnumber' : 188,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_filter_minmax.txt',
},
'GL_ARB_texture_float' : {
'arbnumber' : 41,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_float.txt',
},
'GL_ARB_texture_gather' : {
'arbnumber' : 72,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_gather.txt',
},
'GL_ARB_texture_mirror_clamp_to_edge' : {
'arbnumber' : 149,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_mirror_clamp_to_edge.txt',
},
'GL_ARB_texture_mirrored_repeat' : {
'arbnumber' : 21,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_mirrored_repeat.txt',
},
'GL_ARB_texture_multisample' : {
'arbnumber' : 67,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_multisample.txt',
},
'GL_ARB_texture_non_power_of_two' : {
'arbnumber' : 34,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_non_power_of_two.txt',
},
'GL_ARB_texture_query_levels' : {
'arbnumber' : 140,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_query_levels.txt',
},
'GL_ARB_texture_query_lod' : {
'arbnumber' : 73,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_query_lod.txt',
},
'GL_ARB_texture_rectangle' : {
'arbnumber' : 38,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_rectangle.txt',
},
'GL_ARB_texture_rg' : {
'arbnumber' : 53,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_texture_rg.txt',
},
'GL_ARB_texture_rgb10_a2ui' : {
'arbnumber' : 83,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_rgb10_a2ui.txt',
},
'GL_ARB_texture_stencil8' : {
'arbnumber' : 150,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_stencil8.txt',
},
'GL_ARB_texture_storage' : {
'arbnumber' : 117,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_storage.txt',
},
'GL_ARB_texture_storage_multisample' : {
'arbnumber' : 141,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_storage_multisample.txt',
},
'GL_ARB_texture_swizzle' : {
'arbnumber' : 84,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_swizzle.txt',
},
'GL_ARB_texture_view' : {
'arbnumber' : 124,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_texture_view.txt',
},
'GL_ARB_timer_query' : {
'arbnumber' : 85,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_timer_query.txt',
},
'GL_ARB_transform_feedback2' : {
'arbnumber' : 93,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_transform_feedback2.txt',
},
'GL_ARB_transform_feedback3' : {
'arbnumber' : 94,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_transform_feedback3.txt',
},
'GL_ARB_transform_feedback_instanced' : {
'arbnumber' : 109,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_transform_feedback_instanced.txt',
},
'GL_ARB_transform_feedback_overflow_query' : {
'arbnumber' : 173,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_transform_feedback_overflow_query.txt',
},
'GL_ARB_transpose_matrix' : {
'arbnumber' : 3,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_transpose_matrix.txt',
},
'GL_ARB_uniform_buffer_object' : {
'arbnumber' : 57,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_uniform_buffer_object.txt',
},
'GL_ARB_vertex_array_bgra' : {
'arbnumber' : 68,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_vertex_array_bgra.txt',
},
'GL_ARB_vertex_array_object' : {
'arbnumber' : 54,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_vertex_array_object.txt',
},
'GL_ARB_vertex_attrib_64bit' : {
'arbnumber' : 99,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_vertex_attrib_64bit.txt',
},
'GL_ARB_vertex_attrib_binding' : {
'arbnumber' : 125,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_vertex_attrib_binding.txt',
},
'GL_ARB_vertex_blend' : {
'arbnumber' : 15,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_vertex_blend.txt',
},
'GL_ARB_vertex_buffer_object' : {
'arbnumber' : 28,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_vertex_buffer_object.txt',
'alias' : { 'GLX_ARB_vertex_buffer_object' },
},
'GL_ARB_vertex_program' : {
'arbnumber' : 26,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_vertex_program.txt',
},
'GL_ARB_vertex_shader' : {
'arbnumber' : 31,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_vertex_shader.txt',
},
'GL_ARB_vertex_type_10f_11f_11f_rev' : {
'arbnumber' : 151,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_vertex_type_10f_11f_11f_rev.txt',
},
'GL_ARB_vertex_type_2_10_10_10_rev' : {
'arbnumber' : 86,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_vertex_type_2_10_10_10_rev.txt',
},
'GL_ARB_viewport_array' : {
'arbnumber' : 100,
'flags' : { 'public' },
'url' : 'extensions/ARB/ARB_viewport_array.txt',
},
'GL_ARB_window_pos' : {
'arbnumber' : 25,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/ARB_window_pos.txt',
},
'GL_ARM_mali_program_binary' : {
'esnumber' : 120,
'flags' : { 'public' },
'url' : 'extensions/ARM/ARM_mali_program_binary.txt',
},
'GL_ARM_mali_shader_binary' : {
'esnumber' : 81,
'flags' : { 'public' },
'url' : 'extensions/ARM/ARM_mali_shader_binary.txt',
},
'GL_ARM_rgba8' : {
'esnumber' : 82,
'flags' : { 'public' },
'url' : 'extensions/ARM/ARM_rgba8.txt',
},
'GL_ARM_shader_framebuffer_fetch' : {
'esnumber' : 165,
'flags' : { 'public' },
'url' : 'extensions/ARM/ARM_shader_framebuffer_fetch.txt',
},
'GL_ARM_shader_framebuffer_fetch_depth_stencil' : {
'esnumber' : 166,
'flags' : { 'public' },
'url' : 'extensions/ARM/ARM_shader_framebuffer_fetch_depth_stencil.txt',
},
'GL_ATI_draw_buffers' : {
'number' : 277,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_draw_buffers.txt',
},
'GL_ATI_element_array' : {
'number' : 256,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_element_array.txt',
},
'GL_ATI_envmap_bumpmap' : {
'number' : 244,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_envmap_bumpmap.txt',
},
'GL_ATI_fragment_shader' : {
'number' : 245,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_fragment_shader.txt',
},
'GL_ATI_map_object_buffer' : {
'number' : 288,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_map_object_buffer.txt',
},
'GL_ATI_meminfo' : {
'number' : 359,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/ATI/ATI_meminfo.txt',
},
'GL_ATI_pn_triangles' : {
'number' : 246,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_pn_triangles.txt',
},
'GL_ATI_separate_stencil' : {
'number' : 289,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_separate_stencil.txt',
},
'GL_ATI_text_fragment_shader' : {
'number' : 269,
'flags' : { 'public' },
'supporters' : { 'APPLE', 'NVIDIA' },
'url' : 'extensions/ATI/ATI_text_fragment_shader.txt',
},
'GL_ATI_texture_env_combine3' : {
'number' : 279,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_texture_env_combine3.txt',
},
'GL_ATI_texture_float' : {
'number' : 280,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_texture_float.txt',
},
'GL_ATI_texture_mirror_once' : {
'number' : 221,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_texture_mirror_once.txt',
},
'GL_ATI_vertex_array_object' : {
'number' : 247,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_vertex_array_object.txt',
},
'GL_ATI_vertex_attrib_array_object' : {
'number' : 290,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_vertex_attrib_array_object.txt',
},
'GL_ATI_vertex_streams' : {
'number' : 249,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/ATI_vertex_streams.txt',
},
'GL_DMP_program_binary' : {
'esnumber' : 192,
'flags' : { 'public' },
'url' : 'extensions/DMP/DMP_program_binary.txt',
},
'GL_DMP_shader_binary' : {
'esnumber' : 88,
'flags' : { 'public' },
'url' : 'extensions/DMP/DMP_shader_binary.txt',
},
'GL_EXT_422_pixels' : {
'number' : 178,
'flags' : { 'public' },
'supporters' : { 'INGR' },
'url' : 'extensions/EXT/EXT_422_pixels.txt',
},
'GL_EXT_YUV_target' : {
'esnumber' : 222,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_YUV_target.txt',
},
'GL_EXT_abgr' : {
'number' : 1,
'flags' : { 'public' },
'supporters' : { 'IBM', 'KGC', 'SGI', 'SUN' },
'url' : 'extensions/EXT/EXT_abgr.txt',
},
'GL_EXT_base_instance' : {
'esnumber' : 203,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_base_instance.txt',
},
'GL_EXT_bgra' : {
'number' : 129,
'flags' : { 'public' },
'supporters' : { 'MS' },
'url' : 'extensions/EXT/EXT_bgra.txt',
},
'GL_EXT_bindable_uniform' : {
'number' : 342,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_bindable_uniform.txt',
},
'GL_EXT_blend_color' : {
'number' : 2,
'flags' : { 'public' },
'supporters' : { 'HP', 'INGR', 'KGC', 'SGI', 'SUN' },
'url' : 'extensions/EXT/EXT_blend_color.txt',
},
'GL_EXT_blend_equation_separate' : {
'number' : 299,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_blend_equation_separate.txt',
},
'GL_EXT_blend_func_extended' : {
'esnumber' : 247,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_blend_func_extended.txt',
},
'GL_EXT_blend_func_separate' : {
'number' : 173,
'flags' : { 'public' },
'supporters' : { 'IBM', 'INGR' },
'url' : 'extensions/EXT/EXT_blend_func_separate.txt',
},
'GL_EXT_blend_logic_op' : {
'number' : 39,
'flags' : { 'public' },
'supporters' : { 'HP', 'IBM', 'INGR', 'KGC', 'SGI' },
'url' : 'extensions/EXT/EXT_blend_logic_op.txt',
},
'GL_EXT_blend_minmax' : {
'number' : 37,
'esnumber' : 65,
'flags' : { 'public' },
'supporters' : { 'HP', 'IBM', 'INGR', 'KGC', 'SGI', 'SUN' },
'url' : 'extensions/EXT/EXT_blend_minmax.txt',
},
'GL_EXT_blend_subtract' : {
'number' : 38,
'flags' : { 'public' },
'supporters' : { 'HP', 'IBM', 'INGR', 'KGC', 'SGI', 'SUN' },
'url' : 'extensions/EXT/EXT_blend_subtract.txt',
},
'GLX_EXT_buffer_age' : {
'number' : 427,
'flags' : { 'public' },
'supporters' : { 'INTEL', 'NVIDIA' },
'url' : 'extensions/EXT/GLX_EXT_buffer_age.txt',
},
'GL_EXT_buffer_storage' : {
'esnumber' : 239,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_buffer_storage.txt',
},
'GL_EXT_clear_texture' : {
'esnumber' : 269,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_clear_texture.txt',
},
'GL_EXT_clip_cull_distance' : {
'esnumber' : 257,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_clip_cull_distance.txt',
},
'GL_EXT_clip_volume_hint' : {
'number' : 79,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_clip_volume_hint.txt',
},
'GL_EXT_cmyka' : {
'number' : 18,
'flags' : { 'public' },
'supporters' : { 'ES', 'SGI' },
'url' : 'extensions/EXT/EXT_cmyka.txt',
},
'GL_EXT_color_buffer_float' : {
'esnumber' : 137,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_color_buffer_float.txt',
},
'GL_EXT_color_buffer_half_float' : {
'esnumber' : 97,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_color_buffer_half_float.txt',
},
'GL_EXT_color_subtable' : {
'number' : 74,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_color_subtable.txt',
},
'GL_EXT_compiled_vertex_array' : {
'number' : 97,
'flags' : { 'public' },
'supporters' : { 'INTEL', 'SGI' },
'url' : 'extensions/EXT/EXT_compiled_vertex_array.txt',
},
'GL_EXT_compressed_ETC1_RGB8_sub_texture' : {
'esnumber' : 188,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_compressed_ETC1_RGB8_sub_texture.txt',
},
'GL_EXT_conservative_depth' : {
'esnumber' : 268,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_conservative_depth.txt',
},
'GL_EXT_convolution' : {
'number' : 12,
'flags' : { 'public' },
'supporters' : { 'HP', 'KGC', 'SGI', 'SUN' },
'url' : 'extensions/EXT/EXT_convolution.txt',
},
'GL_EXT_coordinate_frame' : {
'number' : 156,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_coordinate_frame.txt',
},
'GL_EXT_copy_image' : {
'esnumber' : 175,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_copy_image.txt',
},
'GL_EXT_copy_texture' : {
'number' : 10,
'flags' : { 'public' },
'supporters' : { 'ES', 'HP', 'SGI' },
'url' : 'extensions/EXT/EXT_copy_texture.txt',
},
'GLX_EXT_create_context_es2_profile' : {
'number' : 399,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/GLX_EXT_create_context_es2_profile.txt',
'alias' : { 'GLX_EXT_create_context_es_profile' },
},
'GL_EXT_cull_vertex' : {
'number' : 98,
'flags' : { 'public' },
'supporters' : { 'INTEL', 'SGI' },
'url' : 'extensions/EXT/EXT_cull_vertex.txt',
},
'GL_EXT_debug_label' : {
'number' : 439,
'esnumber' : 98,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/EXT/EXT_debug_label.txt',
},
'GL_EXT_debug_marker' : {
'number' : 440,
'esnumber' : 99,
'flags' : { 'public' },
'supporters' : { 'APPLE' },
'url' : 'extensions/EXT/EXT_debug_marker.txt',
},
'GL_EXT_depth_bounds_test' : {
'number' : 297,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_depth_bounds_test.txt',
},
'GL_EXT_direct_state_access' : {
'number' : 353,
'flags' : { 'public' },
'supporters' : { 'Blizzard', 'NVIDIA', 'S3', 'TransGaming' },
'url' : 'extensions/EXT/EXT_direct_state_access.txt',
},
'GL_EXT_discard_framebuffer' : {
'esnumber' : 64,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_discard_framebuffer.txt',
},
'GL_EXT_disjoint_timer_query' : {
'esnumber' : 150,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_disjoint_timer_query.txt',
},
'GL_EXT_draw_buffers' : {
'esnumber' : 151,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_draw_buffers.txt',
},
'GL_EXT_draw_buffers2' : {
'number' : 340,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_draw_buffers2.txt',
},
'GL_EXT_draw_buffers_indexed' : {
'esnumber' : 176,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_draw_buffers_indexed.txt',
},
'GL_EXT_draw_elements_base_vertex' : {
'esnumber' : 204,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_draw_elements_base_vertex.txt',
},
'GL_EXT_draw_instanced' : {
'number' : 327,
'esnumber' : 157,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_draw_instanced.txt',
},
'GL_EXT_draw_range_elements' : {
'number' : 112,
'flags' : { 'public' },
'supporters' : { 'MS' },
'url' : 'extensions/EXT/EXT_draw_range_elements.txt',
},
'GL_EXT_draw_transform_feedback' : {
'esnumber' : 272,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_draw_transform_feedback.txt',
},
'GL_EXT_external_buffer' : {
'number' : 508,
'esnumber' : 284,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_external_buffer.txt',
},
'GL_EXT_EGL_image_array' : {
'esnumber' : 278,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_EGL_image_array.txt',
},
'GL_EXT_EGL_image_external_wrap_modes' : {
'esnumber' : 298,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_EGL_image_external_wrap_modes.txt',
},
'GL_EXT_EGL_image_storage' : {
'number' : 522,
'esnumber' : 301,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_EGL_image_storage.txt',
},
'GL_EXT_memory_object' : {
'number' : 503,
'esnumber' : 280,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_external_objects.txt',
'alias' : { 'GL_EXT_semaphore' },
},
'GL_EXT_memory_object_fd' : {
'number' : 504,
'esnumber' : 281,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_external_objects_fd.txt',
'alias' : { 'GL_EXT_semaphore_fd' },
},
'GL_EXT_memory_object_win32' : {
'number' : 505,
'esnumber' : 282,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_external_objects_win32.txt',
'alias' : { 'GL_EXT_semaphore_win32' },
},
'GL_EXT_float_blend' : {
'esnumber' : 224,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_float_blend.txt',
},
'GL_EXT_fog_coord' : {
'number' : 149,
'flags' : { 'public' },
'supporters' : { '3DFX', 'NVIDIA', 'REND' },
'url' : 'extensions/EXT/EXT_fog_coord.txt',
},
'GL_EXT_frag_depth' : {
'esnumber' : 86,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_frag_depth.txt',
},
'GL_EXT_fragment_lighting' : {
'number' : 102,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/EXT/EXT_fragment_lighting.txt',
},
'GL_EXT_framebuffer_blit' : {
'number' : 316,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_framebuffer_blit.txt',
},
'GL_EXT_framebuffer_multisample' : {
'number' : 317,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_framebuffer_multisample.txt',
},
'GL_EXT_framebuffer_multisample_blit_scaled' : {
'number' : 409,
'flags' : { 'public' },
'supporters' : { 'APPLE', 'NVIDIA' },
'url' : 'extensions/EXT/EXT_framebuffer_multisample_blit_scaled.txt',
},
'GL_EXT_framebuffer_object' : {
'number' : 310,
'flags' : { 'public' },
'supporters' : { '3DL', 'ATI', 'INTEL', 'NVIDIA' },
'url' : 'extensions/EXT/EXT_framebuffer_object.txt',
},
'GL_EXT_framebuffer_sRGB' : {
'number' : 337,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_framebuffer_sRGB.txt',
'alias' : { 'GLX_EXT_framebuffer_sRGB', 'WGL_EXT_framebuffer_sRGB' },
},
'GL_EXT_geometry_shader' : {
'esnumber' : 177,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_geometry_shader.txt',
'alias' : { 'GL_EXT_geometry_point_size' },
},
'GL_EXT_geometry_shader4' : {
'number' : 324,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_geometry_shader4.txt',
},
'GLX_EXT_stereo_tree' : {
'number' : 452,
'flags' : { 'public' },
'url' : 'extensions/EXT/GLX_EXT_stereo_tree.txt',
},
'GL_EXT_gpu_program_parameters' : {
'number' : 320,
'flags' : { 'public' },
'supporters' : { 'APPLE', 'NVIDIA' },
'url' : 'extensions/EXT/EXT_gpu_program_parameters.txt',
},
'GL_EXT_gpu_shader4' : {
'number' : 326,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_gpu_shader4.txt',
},
'GL_EXT_gpu_shader5' : {
'esnumber' : 178,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_gpu_shader5.txt',
},
'GL_EXT_histogram' : {
'number' : 11,
'flags' : { 'public' },
'supporters' : { 'INGR', 'KGC', 'SGI', 'SUN' },
'url' : 'extensions/EXT/EXT_histogram.txt',
},
'GLX_EXT_import_context' : {
'number' : 47,
'flags' : { 'public' },
'supporters' : { 'IBM', 'SGI' },
'url' : 'extensions/EXT/GLX_EXT_import_context.txt',
},
'GL_EXT_index_array_formats' : {
'number' : 96,
'flags' : { 'public' },
'supporters' : { 'INTEL', 'SGI' },
'url' : 'extensions/EXT/EXT_index_array_formats.txt',
},
'GL_EXT_index_func' : {
'number' : 95,
'flags' : { 'public' },
'supporters' : { 'INTEL', 'SGI' },
'url' : 'extensions/EXT/EXT_index_func.txt',
},
'GL_EXT_index_material' : {
'number' : 94,
'flags' : { 'public' },
'supporters' : { 'INTEL', 'SGI' },
'url' : 'extensions/EXT/EXT_index_material.txt',
},
'GL_EXT_index_texture' : {
'number' : 93,
'flags' : { 'public' },
'supporters' : { 'INTEL', 'SGI' },
'url' : 'extensions/EXT/EXT_index_texture.txt',
},
'GL_EXT_instanced_arrays' : {
'esnumber' : 156,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_instanced_arrays.txt',
},
'GLX_EXT_libglvnd' : {
'number' : 482,
'flags' : { 'public' },
'url' : 'extensions/EXT/GLX_EXT_libglvnd.txt',
},
'GL_EXT_light_texture' : {
'number' : 117,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/EXT/EXT_light_texture.txt',
},
'GL_EXT_map_buffer_range' : {
'esnumber' : 121,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_map_buffer_range.txt',
},
'GL_EXT_misc_attribute' : {
'number' : 31,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_misc_attribute.txt',
},
'GL_EXT_multi_draw_arrays' : {
'number' : 148,
'esnumber' : 69,
'flags' : { 'public' },
'supporters' : { 'IBM', 'IMG', 'SUN' },
'url' : 'extensions/EXT/EXT_multi_draw_arrays.txt',
'alias' : { 'GL_SUN_multi_draw_arrays' },
},
'GL_EXT_multi_draw_indirect' : {
'esnumber' : 205,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_multi_draw_indirect.txt',
},
'GL_EXT_multiple_textures' : {
'flags' : { 'obsolete' },
'url' : 'extensions/EXT/EXT_multiple_textures.txt',
},
'GL_EXT_multisample_compatibility' : {
'esnumber' : 248,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_multisample_compatibility.txt',
},
'GL_EXT_multisampled_render_to_texture' : {
'esnumber' : 106,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_multisampled_render_to_texture.txt',
},
'GL_EXT_multisampled_render_to_texture2' : {
'esnumber' : 275,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_multisampled_render_to_texture2.txt',
},
'GL_EXT_multiview_draw_buffers' : {
'esnumber' : 125,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_multiview_draw_buffers.txt',
},
'GLU_EXT_nurbs_tessellator' : {
'number' : 100,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/EXT/GLU_EXT_nurbs_tessellator.txt',
},
'GLU_EXT_object_space_tess' : {
'number' : 75,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/EXT/GLU_EXT_object_space_tess.txt',
},
'GL_EXT_occlusion_query_boolean' : {
'esnumber' : 100,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_occlusion_query_boolean.txt',
},
'GL_EXT_packed_depth_stencil' : {
'number' : 312,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_packed_depth_stencil.txt',
},
'GL_EXT_packed_float' : {
'number' : 328,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_packed_float.txt',
'alias' : { 'GLX_EXT_fbconfig_packed_float', 'WGL_EXT_pixel_format_packed_float' },
},
'GL_EXT_packed_pixels' : {
'number' : 23,
'flags' : { 'public' },
'supporters' : { 'ES', 'INGR', 'SGI' },
'url' : 'extensions/EXT/EXT_packed_pixels.txt',
},
'GL_EXT_paletted_texture' : {
'number' : 78,
'flags' : { 'public' },
'supporters' : { 'MS', 'SGI' },
'url' : 'extensions/EXT/EXT_paletted_texture.txt',
},
'GL_EXT_pixel_buffer_object' : {
'number' : 302,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_pixel_buffer_object.txt',
},
'GL_EXT_pixel_transform' : {
'number' : 138,
'flags' : { 'public' },
'supporters' : { 'HP', 'SUN' },
'url' : 'extensions/EXT/EXT_pixel_transform.txt',
},
'GL_EXT_pixel_transform_color_table' : {
'number' : 139,
'flags' : { 'public' },
'supporters' : { 'HP', 'SUN' },
'url' : 'extensions/EXT/EXT_pixel_transform_color_table.txt',
},
'GL_EXT_point_parameters' : {
'number' : 54,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/EXT/EXT_point_parameters.txt',
},
'GL_EXT_polygon_offset' : {
'number' : 3,
'flags' : { 'public' },
'supporters' : { 'HP', 'IBM', 'INGR', 'KGC', 'SGI' },
'url' : 'extensions/EXT/EXT_polygon_offset.txt',
},
'GL_EXT_polygon_offset_clamp' : {
'number' : 460,
'esnumber' : 252,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_polygon_offset_clamp.txt',
},
'GL_EXT_post_depth_coverage' : {
'number' : 461,
'esnumber' : 225,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_post_depth_coverage.txt',
},
'GL_EXT_primitive_bounding_box' : {
'esnumber' : 186,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_primitive_bounding_box.txt',
},
'GL_EXT_protected_textures' : {
'esnumber' : 256,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_protected_textures.txt',
},
'GL_EXT_provoking_vertex' : {
'number' : 364,
'flags' : { 'public' },
'supporters' : { 'NVIDIA', 'TransGaming' },
'url' : 'extensions/EXT/EXT_provoking_vertex.txt',
},
'GL_EXT_pvrtc_sRGB' : {
'esnumber' : 155,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_pvrtc_sRGB.txt',
},
'GL_EXT_raster_multisample' : {
'number' : 462,
'esnumber' : 226,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_raster_multisample.txt',
},
'GL_EXT_read_format_bgra' : {
'esnumber' : 66,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_read_format_bgra.txt',
},
'GL_EXT_render_snorm' : {
'esnumber' : 206,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_render_snorm.txt',
},
'GL_EXT_rescale_normal' : {
'number' : 27,
'flags' : { 'public' },
'supporters' : { 'IBM', 'SUN' },
'url' : 'extensions/EXT/EXT_rescale_normal.txt',
},
'GL_EXT_robustness' : {
'esnumber' : 107,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_robustness.txt',
},
'GL_EXT_sRGB' : {
'esnumber' : 105,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_sRGB.txt',
},
'GL_EXT_sRGB_write_control' : {
'esnumber' : 153,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_sRGB_write_control.txt',
},
'GL_EXT_scene_marker' : {
'number' : 120,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_scene_marker.txt',
'alias' : { 'GLX_EXT_scene_marker' },
},
'GL_EXT_secondary_color' : {
'number' : 145,
'flags' : { 'public' },
'supporters' : { '3DFX', 'NVIDIA', 'REND' },
'url' : 'extensions/EXT/EXT_secondary_color.txt',
},
'GL_EXT_separate_shader_objects' : {
'number' : 377,
'esnumber' : 101,
'flags' : { 'public' },
'supporters' : { 'NVIDIA', 'TransGaming' },
'url' : 'extensions/EXT/EXT_separate_shader_objects.gl.txt',
'esurl' : 'extensions/EXT/EXT_separate_shader_objects.gles.txt',
'comments' : 'Different that the OpenGL extension with the same name string.',
},
'GL_EXT_separate_specular_color' : {
'number' : 144,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_separate_specular_color.txt',
},
'GL_EXT_shader_framebuffer_fetch' : {
'number' : 520,
'esnumber' : 122,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_shader_framebuffer_fetch.txt',
'alias' : { 'GL_EXT_shader_framebuffer_fetch_non_coherent' },
},
'GL_EXT_shader_group_vote' : {
'esnumber' : 254,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_shader_group_vote.txt',
},
'GL_EXT_shader_image_load_formatted' : {
'number' : 449,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_shader_image_load_formatted.txt',
},
'GL_EXT_shader_image_load_store' : {
'number' : 386,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_shader_image_load_store.txt',
},
'GL_EXT_shader_implicit_conversions' : {
'esnumber' : 179,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_shader_implicit_conversions.txt',
},
'GL_EXT_shader_integer_mix' : {
'number' : 437,
'esnumber' : 161,
'flags' : { 'public' },
'supporters' : { 'INTEL' },
'url' : 'extensions/EXT/EXT_shader_integer_mix.txt',
},
'GL_EXT_shader_io_blocks' : {
'esnumber' : 180,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_shader_io_blocks.txt',
},
'GL_EXT_shader_non_constant_global_initializers' : {
'esnumber' : 264,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_shader_non_constant_global_initializers.txt',
},
'GL_EXT_shader_pixel_local_storage' : {
'esnumber' : 167,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_shader_pixel_local_storage.txt',
},
'GL_EXT_shader_pixel_local_storage2' : {
'esnumber' : 253,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_shader_pixel_local_storage2.txt',
},
'GL_EXT_shader_texture_lod' : {
'esnumber' : 77,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_shader_texture_lod.txt',
},
'GL_EXT_shadow_funcs' : {
'number' : 267,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_shadow_funcs.txt',
},
'GL_EXT_shadow_samplers' : {
'esnumber' : 102,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_shadow_samplers.txt',
},
'GL_EXT_shared_texture_palette' : {
'number' : 141,
'flags' : { 'public' },
'supporters' : { '3DFX', '3DL', 'SGI' },
'url' : 'extensions/EXT/EXT_shared_texture_palette.txt',
},
'GL_EXT_sparse_texture' : {
'esnumber' : 240,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_sparse_texture.txt',
},
'GL_EXT_sparse_texture2' : {
'number' : 463,
'esnumber' : 259,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_sparse_texture2.txt',
},
'GL_EXT_static_vertex_array' : {
'flags' : { 'public' },
'supporters' : { 'IBM' },
'url' : 'extensions/EXT/EXT_static_vertex_array.txt',
},
'GL_EXT_stencil_clear_tag' : {
'number' : 314,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_stencil_clear_tag.txt',
},
'GL_EXT_stencil_two_side' : {
'number' : 268,
'flags' : { 'public' },
'supporters' : { 'APPLE', 'NVIDIA' },
'url' : 'extensions/EXT/EXT_stencil_two_side.txt',
},
'GL_EXT_stencil_wrap' : {
'number' : 176,
'flags' : { 'public' },
'supporters' : { 'INGR', 'NVIDIA' },
'url' : 'extensions/EXT/EXT_stencil_wrap.txt',
},
'GL_EXT_subtexture' : {
'number' : 9,
'flags' : { 'public' },
'supporters' : { 'HP', 'IBM', 'INGR', 'KGC', 'SGI' },
'url' : 'extensions/EXT/EXT_subtexture.txt',
},
'GL_EXT_swap_control' : {
'number' : 375,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_swap_control.txt',
},
'GLX_EXT_swap_control_tear' : {
'number' : 414,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/GLX_EXT_swap_control_tear.txt',
},
'GL_EXT_tessellation_shader' : {
'esnumber' : 181,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_tessellation_shader.txt',
'alias' : { 'GL_EXT_tessellation_point_size' },
},
'GL_EXT_texenv_op' : {
'flags' : { 'obsolete' },
'url' : 'extensions/EXT/EXT_texenv_op.txt',
'comments' : 'Evolved into EXT_texture_env_combine.',
},
'GL_EXT_texture' : {
'number' : 4,
'flags' : { 'public' },
'supporters' : { 'HP', 'INGR', 'KGC', 'SGI' },
'url' : 'extensions/EXT/EXT_texture.txt',
},
'GL_EXT_texture3D' : {
'number' : 6,
'flags' : { 'public' },
'supporters' : { 'ES', 'HP', 'IBM', 'SGI', 'SUN' },
'url' : 'extensions/EXT/EXT_texture3D.txt',
},
'GL_EXT_texture_array' : {
'number' : 329,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_array.txt',
},
'GL_EXT_texture_border_clamp' : {
'esnumber' : 182,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_border_clamp.txt',
},
'GL_EXT_texture_buffer' : {
'esnumber' : 183,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_buffer.txt',
},
'GL_EXT_texture_buffer_object' : {
'number' : 330,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_buffer_object.txt',
},
'GL_EXT_texture_compression_astc_decode_mode' : {
'esnumber' : 276,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_compression_astc_decode_mode.txt',
'alias' : { 'GL_EXT_texture_compression_astc_decode_mode_rgb9e5' },
},
'GL_EXT_texture_compression_bptc' : {
'esnumber' : 287,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_compression_bptc.txt',
},
'GL_EXT_texture_compression_dxt1' : {
'number' : 309,
'esnumber' : 49,
'flags' : { 'public' },
'supporters' : { 'INTEL', 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_compression_dxt1.txt',
},
'GL_EXT_texture_compression_latc' : {
'number' : 331,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_compression_latc.txt',
},
'GL_EXT_texture_compression_rgtc' : {
'number' : 332,
'esnumber' : 286,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_compression_rgtc.txt',
},
'GL_EXT_texture_compression_s3tc' : {
'number' : 198,
'esnumber' : 154,
'flags' : { 'public' },
'supporters' : { 'INTEL', 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_compression_s3tc.txt',
},
'GL_EXT_texture_compression_s3tc_srgb' : {
'esnumber' : 289,
'flags' : { 'public' },
'supporters' : { 'ANGLE' },
'url' : 'extensions/EXT/EXT_texture_compression_s3tc_srgb.txt',
},
'GL_EXT_texture_cube_map' : {
'flags' : { 'incomplete' },
'url' : 'extensions/EXT/EXT_texture_cube_map.txt',
'comments' : 'Extension shipped but was not fully specified. Similar to ARB_texture_cube_map.',
},
'GL_EXT_texture_cube_map_array' : {
'esnumber' : 184,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_cube_map_array.txt',
},
'GL_EXT_texture_env' : {
'number' : 146,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_env.txt',
},
'GL_EXT_texture_env_add' : {
'number' : 185,
'flags' : { 'public' },
'supporters' : { 'ATI', 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_env_add.txt',
},
'GL_EXT_texture_env_combine' : {
'number' : 158,
'flags' : { 'public' },
'supporters' : { 'ATI', 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_env_combine.txt',
},
'GL_EXT_texture_env_dot3' : {
'number' : 220,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/EXT/EXT_texture_env_dot3.txt',
},
'GL_EXT_texture_filter_anisotropic' : {
'number' : 187,
'esnumber' : 41,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_filter_anisotropic.txt',
},
'GL_EXT_texture_filter_minmax' : {
'number' : 464,
'esnumber' : 227,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_filter_minmax.txt',
},
'GL_EXT_texture_format_BGRA8888' : {
'esnumber' : 51,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_format_BGRA8888.txt',
},
'GL_EXT_texture_format_sRGB_override' : {
'esnumber' : 299,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_format_sRGB_override.txt',
},
'GLX_EXT_texture_from_pixmap' : {
'number' : 344,
'flags' : { 'public' },
'supporters' : { 'MESA', 'NVIDIA' },
'url' : 'extensions/EXT/GLX_EXT_texture_from_pixmap.txt',
},
'GL_EXT_texture_integer' : {
'number' : 343,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_integer.txt',
},
'GL_EXT_texture_lod_bias' : {
'number' : 186,
'esnumber' : 60,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_lod_bias.txt',
},
'GL_EXT_texture_mirror_clamp' : {
'number' : 298,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_mirror_clamp.txt',
},
'GL_EXT_texture_mirror_clamp_to_edge' : {
'esnumber' : 291,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_mirror_clamp_to_edge.txt',
},
'GL_EXT_texture_norm16' : {
'esnumber' : 207,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_norm16.txt',
},
'GL_EXT_texture_object' : {
'number' : 20,
'flags' : { 'public' },
'supporters' : { 'IBM', 'INGR', 'KGC', 'SGI' },
'url' : 'extensions/EXT/EXT_texture_object.txt',
},
'GL_EXT_texture_perturb_normal' : {
'number' : 147,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_perturb_normal.txt',
},
'GL_EXT_texture_rg' : {
'esnumber' : 103,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_rg.txt',
},
'GL_EXT_texture_sRGB' : {
'number' : 315,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_sRGB.txt',
},
'GL_EXT_texture_sRGB_R8' : {
'esnumber' : 221,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_sRGB_R8.txt',
},
'GL_EXT_texture_sRGB_RG8' : {
'esnumber' : 223,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_sRGB_RG8.txt',
},
'GL_EXT_texture_sRGB_decode' : {
'number' : 402,
'esnumber' : 152,
'flags' : { 'public' },
'supporters' : { 'APPLE', 'CodeWeavers', 'NVIDIA', 'TransGaming' },
'url' : 'extensions/EXT/EXT_texture_sRGB_decode.txt',
},
'GL_EXT_texture_shared_exponent' : {
'number' : 333,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_shared_exponent.txt',
},
'GL_EXT_texture_snorm' : {
'number' : 365,
'flags' : { 'public' },
'supporters' : { 'NVIDIA', 'TransGaming' },
'url' : 'extensions/EXT/EXT_texture_snorm.txt',
},
'GL_EXT_texture_storage' : {
'esnumber' : 108,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_storage.txt',
},
'GL_EXT_texture_swizzle' : {
'number' : 356,
'flags' : { 'public' },
'supporters' : { 'IdSoftware', 'NVIDIA' },
'url' : 'extensions/EXT/EXT_texture_swizzle.txt',
},
'GL_EXT_texture_type_2_10_10_10_REV' : {
'esnumber' : 42,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_type_2_10_10_10_REV.txt',
},
'GL_EXT_texture_view' : {
'esnumber' : 185,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_texture_view.txt',
},
'GL_EXT_timer_query' : {
'number' : 319,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_timer_query.txt',
},
'GL_EXT_transform_feedback' : {
'number' : 352,
'flags' : { 'public' },
'supporters' : { 'APPLE', 'NVIDIA' },
'url' : 'extensions/EXT/EXT_transform_feedback.txt',
},
'GL_EXT_transform_feedback2' : {
'flags' : { 'incomplete', 'obsolete' },
'url' : 'extensions/EXT/EXT_transform_feedback2.txt',
'comments' : 'Draft extension which is referred to by some other vendor extensions, but shipped as ARB_transform_feedback2.',
},
'GL_EXT_unpack_subimage' : {
'esnumber' : 90,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_unpack_subimage.txt',
},
'GL_EXT_vertex_array' : {
'number' : 30,
'flags' : { 'public' },
'supporters' : { 'DEC', 'HP', 'IBM', 'INGR', 'KGC', 'SGI' },
'url' : 'extensions/EXT/EXT_vertex_array.txt',
},
'GL_EXT_vertex_array_bgra' : {
'number' : 354,
'flags' : { 'public' },
'supporters' : { 'Blizzard', 'NVIDIA', 'S3', 'TransGaming' },
'url' : 'extensions/EXT/EXT_vertex_array_bgra.txt',
},
'GL_EXT_vertex_array_set' : {
'flags' : { 'public' },
'supporters' : { 'IBM' },
'url' : 'extensions/EXT/EXT_vertex_array_set.txt',
},
'GL_EXT_vertex_array_setXXX' : {
'flags' : { 'public' },
'supporters' : { 'IBM' },
'url' : 'extensions/EXT/EXT_vertex_array_setXXX.txt',
},
'GL_EXT_vertex_attrib_64bit' : {
'number' : 387,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_vertex_attrib_64bit.txt',
},
'GL_EXT_vertex_shader' : {
'number' : 248,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_vertex_shader.txt',
},
'GL_EXT_vertex_weighting' : {
'number' : 188,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_vertex_weighting.txt',
},
'GLX_EXT_visual_info' : {
'number' : 28,
'flags' : { 'public' },
'supporters' : { 'IBM', 'KGC', 'SGI' },
'url' : 'extensions/EXT/GLX_EXT_visual_info.txt',
},
'GLX_EXT_visual_rating' : {
'number' : 44,
'flags' : { 'public' },
'supporters' : { 'HP', 'IBM', 'SGI' },
'url' : 'extensions/EXT/GLX_EXT_visual_rating.txt',
},
'GL_EXT_win32_keyed_mutex' : {
'number' : 506,
'esnumber' : 283,
'flags' : { 'public' },
'url' : 'extensions/EXT/EXT_win32_keyed_mutex.txt',
},
'GL_EXT_window_rectangles' : {
'number' : 490,
'esnumber' : 263,
'flags' : { 'public' },
'supporters' : { 'GOOGLE', 'NVIDIA', 'VMware' },
'url' : 'extensions/EXT/EXT_window_rectangles.txt',
},
'GL_EXT_x11_sync_object' : {
'number' : 406,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/EXT_x11_sync_object.txt',
},
'GL_FJ_shader_binary_GCCSO' : {
'esnumber' : 114,
'flags' : { 'public' },
'url' : 'extensions/FJ/FJ_shader_binary_GCCSO.txt',
},
'GL_GREMEDY_frame_terminator' : {
'number' : 345,
'flags' : { 'public' },
'supporters' : { 'GREMEDY' },
'url' : 'extensions/GREMEDY/GREMEDY_frame_terminator.txt',
},
'GL_GREMEDY_string_marker' : {
'number' : 311,
'flags' : { 'public' },
'supporters' : { 'GREMEDY' },
'url' : 'extensions/GREMEDY/GREMEDY_string_marker.txt',
},
'GL_HP_convolution_border_modes' : {
'number' : 67,
'flags' : { 'public' },
'supporters' : { 'HP' },
'url' : 'extensions/HP/HP_convolution_border_modes.txt',
},
'GL_HP_image_transform' : {
'number' : 66,
'flags' : { 'public' },
'supporters' : { 'HP', 'SUN' },
'url' : 'extensions/HP/HP_image_transform.txt',
},
'GL_HP_occlusion_test' : {
'number' : 137,
'flags' : { 'public' },
'supporters' : { 'HP' },
'url' : 'extensions/HP/HP_occlusion_test.txt',
},
'GL_HP_texture_lighting' : {
'number' : 111,
'flags' : { 'public' },
'supporters' : { 'HP' },
'url' : 'extensions/HP/HP_texture_lighting.txt',
},
'GL_IBM_cull_vertex' : {
'number' : 199,
'flags' : { 'public' },
'supporters' : { 'IBM' },
'url' : 'extensions/IBM/IBM_cull_vertex.txt',
},
'GL_IBM_multimode_draw_arrays' : {
'number' : 200,
'flags' : { 'public' },
'supporters' : { 'IBM' },
'url' : 'extensions/IBM/IBM_multimode_draw_arrays.txt',
},
'GL_IBM_rasterpos_clip' : {
'number' : 110,
'flags' : { 'public' },
'supporters' : { 'IBM' },
'url' : 'extensions/IBM/IBM_rasterpos_clip.txt',
},
'GL_IBM_static_data' : {
'number' : 223,
'flags' : { 'public' },
'url' : 'extensions/IBM/IBM_static_data.txt',
},
'GL_IBM_texture_mirrored_repeat' : {
'number' : 224,
'flags' : { 'public' },
'url' : 'extensions/IBM/IBM_texture_mirrored_repeat.txt',
},
'GL_IBM_vertex_array_lists' : {
'number' : 201,
'flags' : { 'public' },
'supporters' : { 'IBM' },
'url' : 'extensions/IBM/IBM_vertex_array_lists.txt',
},
'GL_IGLOO_swap_triangle_strip_vertex_pointerXXX' : {
'flags' : { 'incomplete', 'obsolete' },
'url' : 'extensions/IGLOO/IGLOO_swap_triangle_strip_vertex_pointerXXX.txt',
},
'GL_IGLOO_toggle_color_and_lightXXX' : {
'flags' : { 'incomplete', 'obsolete' },
'url' : 'extensions/IGLOO/IGLOO_toggle_color_and_lightXXX.txt',
},
'GL_IGLOO_viewport_offsetXXX' : {
'flags' : { 'incomplete', 'obsolete' },
'url' : 'extensions/IGLOO/IGLOO_viewport_offsetXXX.txt',
},
'GL_IMG_bindless_texture' : {
'esnumber' : 270,
'flags' : { 'public' },
'url' : 'extensions/IMG/IMG_bindless_texture.txt',
},
'GL_IMG_framebuffer_downsample' : {
'esnumber' : 255,
'flags' : { 'public' },
'url' : 'extensions/IMG/IMG_framebuffer_downsample.txt',
},
'GL_IMG_multisampled_render_to_texture' : {
'esnumber' : 74,
'flags' : { 'public' },
'url' : 'extensions/IMG/IMG_multisampled_render_to_texture.txt',
},
'GL_IMG_program_binary' : {
'esnumber' : 67,
'flags' : { 'public' },
'url' : 'extensions/IMG/IMG_program_binary.txt',
},
'GL_IMG_read_format' : {
'esnumber' : 53,
'flags' : { 'public' },
'url' : 'extensions/IMG/IMG_read_format.txt',
},
'GL_IMG_shader_binary' : {
'esnumber' : 68,
'flags' : { 'public' },
'url' : 'extensions/IMG/IMG_shader_binary.txt',
},
'GL_IMG_texture_compression_pvrtc' : {
'esnumber' : 54,
'flags' : { 'public' },
'url' : 'extensions/IMG/IMG_texture_compression_pvrtc.txt',
},
'GL_IMG_texture_compression_pvrtc2' : {
'esnumber' : 140,
'flags' : { 'public' },
'url' : 'extensions/IMG/IMG_texture_compression_pvrtc2.txt',
},
'GL_IMG_texture_env_enhanced_fixed_function' : {
'esnumber' : 58,
'flags' : { 'public' },
'url' : 'extensions/IMG/IMG_texture_env_enhanced_fixed_function.txt',
},
'GL_IMG_texture_filter_cubic' : {
'esnumber' : 251,
'flags' : { 'public' },
'url' : 'extensions/IMG/IMG_texture_filter_cubic.txt',
},
'GL_IMG_user_clip_plane' : {
'esnumber' : 57,
'flags' : { 'public' },
'url' : 'extensions/IMG/IMG_user_clip_plane.txt',
},
'GL_INGR_color_clamp' : {
'number' : 174,
'flags' : { 'public' },
'supporters' : { 'INGR' },
'url' : 'extensions/INGR/INGR_color_clamp.txt',
},
'GL_INGR_interlace_read' : {
'number' : 175,
'flags' : { 'public' },
'supporters' : { 'INGR' },
'url' : 'extensions/INGR/INGR_interlace_read.txt',
},
'GL_INTEL_conservative_rasterization' : {
'number' : 491,
'esnumber' : 265,
'flags' : { 'public' },
'supporters' : { 'INTEL' },
'url' : 'extensions/INTEL/INTEL_conservative_rasterization.txt',
},
'GL_INTEL_fragment_shader_ordering' : {
'number' : 441,
'flags' : { 'public' },
'supporters' : { 'INTEL' },
'url' : 'extensions/INTEL/INTEL_fragment_shader_ordering.txt',
},
'GL_INTEL_framebuffer_CMAA' : {
'number' : 481,
'esnumber' : 246,
'flags' : { 'public' },
'url' : 'extensions/INTEL/INTEL_framebuffer_CMAA.txt',
},
'GL_INTEL_map_texture' : {
'number' : 429,
'flags' : { 'public' },
'supporters' : { 'INTEL' },
'url' : 'extensions/INTEL/INTEL_map_texture.txt',
},
'GL_INTEL_blackhole_render' : {
'number' : 521,
'esnumber' : 300,
'flags' : { 'public' },
'supporters' : { 'INTEL' },
'url' : 'extensions/INTEL/INTEL_blackhole_render.txt',
},
'GL_INTEL_parallel_arrays' : {
'number' : 136,
'flags' : { 'public' },
'supporters' : { 'INTEL' },
'url' : 'extensions/INTEL/INTEL_parallel_arrays.txt',
},
'GL_INTEL_performance_query' : {
'number' : 443,
'esnumber' : 164,
'flags' : { 'public' },
'url' : 'extensions/INTEL/INTEL_performance_query.txt',
},
'GLX_INTEL_swap_event' : {
'number' : 384,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/INTEL/GLX_INTEL_swap_event.txt',
},
'GL_INTEL_texture_scissor' : {
'number' : 135,
'flags' : { 'public' },
'supporters' : { 'INTEL' },
'url' : 'extensions/INTEL/INTEL_texture_scissor.txt',
},
'GL_KHR_blend_equation_advanced' : {
'arbnumber' : 174,
'esnumber' : 168,
'flags' : { 'public' },
'url' : 'extensions/KHR/KHR_blend_equation_advanced.txt',
'alias' : { 'GL_KHR_blend_equation_advanced_coherent' },
},
'GL_KHR_context_flush_control' : {
'arbnumber' : 168,
'esnumber' : 191,
'flags' : { 'public' },
'url' : 'extensions/KHR/KHR_context_flush_control.txt',
'alias' : { 'GLX_ARB_context_flush_control', 'WGL_ARB_context_flush_control' },
},
'GL_KHR_debug' : {
'arbnumber' : 119,
'esnumber' : 118,
'flags' : { 'public' },
'url' : 'extensions/KHR/KHR_debug.txt',
},
'GL_KHR_no_error' : {
'arbnumber' : 175,
'esnumber' : 243,
'flags' : { 'public' },
'url' : 'extensions/KHR/KHR_no_error.txt',
},
'GL_KHR_parallel_shader_compile' : {
'arbnumber' : 192,
'esnumber' : 288,
'flags' : { 'public' },
'url' : 'extensions/KHR/KHR_parallel_shader_compile.txt',
},
'GL_KHR_robust_buffer_access_behavior' : {
'arbnumber' : 169,
'esnumber' : 189,
'flags' : { 'public' },
'url' : 'extensions/KHR/KHR_robust_buffer_access_behavior.txt',
},
'GL_KHR_robustness' : {
'arbnumber' : 170,
'esnumber' : 190,
'flags' : { 'public' },
'url' : 'extensions/KHR/KHR_robustness.txt',
},
'GL_KHR_texture_compression_astc_hdr' : {
'arbnumber' : 118,
'esnumber' : 117,
'flags' : { 'public' },
'url' : 'extensions/KHR/KHR_texture_compression_astc_hdr.txt',
'alias' : { 'GL_KHR_texture_compression_astc_ldr' },
},
'GL_KHR_texture_compression_astc_sliced_3d' : {
'arbnumber' : 189,
'esnumber' : 249,
'flags' : { 'public' },
'url' : 'extensions/KHR/KHR_texture_compression_astc_sliced_3d.txt',
},
'GL_MESAX_texture_stack' : {
'number' : 318,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESAX/MESAX_texture_stack.txt',
},
'GLX_MESA_agp_offset' : {
'number' : 308,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/GLX_MESA_agp_offset.txt',
},
'GLX_MESA_copy_sub_buffer' : {
'number' : 215,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/GLX_MESA_copy_sub_buffer.txt',
},
'GL_MESA_pack_invert' : {
'number' : 300,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/MESA_pack_invert.txt',
},
'GLX_MESA_pixmap_colormap' : {
'number' : 216,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/GLX_MESA_pixmap_colormap.txt',
},
'GL_MESA_program_binary_formats' : {
'number' : 516,
'esnumber' : 294,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/MESA_program_binary_formats.txt',
},
'GLX_MESA_query_renderer' : {
'number' : 446,
'flags' : { 'public' },
'url' : 'extensions/MESA/GLX_MESA_query_renderer.txt',
},
'GLX_MESA_release_buffers' : {
'number' : 217,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/GLX_MESA_release_buffers.txt',
},
'GL_MESA_resize_buffers' : {
'number' : 196,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/MESA_resize_buffers.txt',
},
'GLX_MESA_set_3dfx_mode' : {
'number' : 218,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/GLX_MESA_set_3dfx_mode.txt',
},
'GL_MESA_shader_integer_functions' : {
'number' : 495,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/MESA_shader_integer_functions.txt',
},
'GLX_MESA_swap_control' : {
'number' : 514,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/GLX_MESA_swap_control.txt',
},
'GL_MESA_tile_raster_order' : {
'number' : 515,
'esnumber' : 292,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/MESA_tile_raster_order.txt',
},
'GL_MESA_window_pos' : {
'number' : 197,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/MESA_window_pos.txt',
},
'GL_MESA_ycbcr_texture' : {
'number' : 301,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/MESA/MESA_ycbcr_texture.txt',
},
'GL_MTK_program_binary' : {
'esnumber' : 245,
'flags' : { 'incomplete', 'private' },
'url' : 'drafts/MTK/MTK_program_binary.txt',
},
'GL_MTK_shader_binary' : {
'esnumber' : 244,
'flags' : { 'incomplete', 'private' },
'url' : 'drafts/MTK/MTK_shader_binary.txt',
},
'GL_NVX_blend_equation_advanced_multi_draw_buffers' : {
'number' : 492,
'esnumber' : 266,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NVX/NVX_blend_equation_advanced_multi_draw_buffers.txt',
},
'GL_NVX_conditional_render' : {
'number' : 425,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NVX/NVX_conditional_render.txt',
},
'GL_NVX_gpu_memory_info' : {
'number' : 438,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NVX/NVX_gpu_memory_info.txt',
},
'GL_NVX_linked_gpu_multicast' : {
'number' : 493,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NVX/NVX_linked_gpu_multicast.txt',
},
'GL_NV_3dvision_settings' : {
'esnumber' : 129,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_3dvision_settings.txt',
},
'GL_NV_EGL_stream_consumer_external' : {
'esnumber' : 104,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_EGL_stream_consumer_external.txt',
},
'GL_NV_alpha_to_coverage_dither_control' : {
'number' : 500,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_alpha_to_coverage_dither_control.txt',
},
'GL_NV_bgr' : {
'esnumber' : 135,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_bgr.txt',
},
'GL_NV_bindless_multi_draw_indirect' : {
'number' : 432,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_bindless_multi_draw_indirect.txt',
},
'GL_NV_bindless_multi_draw_indirect_count' : {
'number' : 456,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_bindless_multi_draw_indirect_count.txt',
},
'GL_NV_bindless_texture' : {
'number' : 418,
'esnumber' : 197,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_bindless_texture.txt',
},
'GL_NV_blend_equation_advanced' : {
'number' : 433,
'esnumber' : 163,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_blend_equation_advanced.txt',
'alias' : { 'GL_NV_blend_equation_advanced_coherent' },
},
'GL_NV_blend_minmax_factor' : {
'number' : 510,
'esnumber' : 285,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_blend_minmax_factor.txt',
},
'GL_NV_blend_square' : {
'number' : 194,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_blend_square.txt',
},
'GL_NV_clip_space_w_scaling' : {
'number' : 486,
'esnumber' : 295,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_clip_space_w_scaling.txt',
},
'GL_NV_command_list' : {
'number' : 477,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_command_list.txt',
},
'GL_NV_compute_program5' : {
'number' : 421,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_compute_program5.txt',
},
'GL_NV_conditional_render' : {
'number' : 346,
'esnumber' : 198,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_conditional_render.txt',
},
'GL_NV_conservative_raster' : {
'number' : 465,
'esnumber' : 228,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_conservative_raster.txt',
},
'GL_NV_conservative_raster_dilate' : {
'number' : 480,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_conservative_raster_dilate.txt',
},
'GL_NV_conservative_raster_pre_snap' : {
'number' : 517,
'esnumber' : 297,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_conservative_raster_pre_snap.txt',
},
'GL_NV_conservative_raster_pre_snap_triangles' : {
'number' : 487,
'esnumber' : 262,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_conservative_raster_pre_snap_triangles.txt',
},
'GL_NV_conservative_raster_underestimation' : {
'number' : 518,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_conservative_raster_underestimation.txt',
},
'GLX_NV_copy_buffer' : {
'number' : 457,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/GLX_NV_copy_buffer.txt',
},
'GL_NV_copy_buffer' : {
'esnumber' : 158,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_copy_buffer.txt',
},
'GL_NV_copy_depth_to_color' : {
'number' : 243,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_copy_depth_to_color.txt',
},
'GL_NV_copy_image' : {
'number' : 376,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_copy_image.txt',
'alias' : { 'GLX_NV_copy_image', 'WGL_NV_copy_image' },
},
'GL_NV_coverage_sample' : {
'esnumber' : 72,
'flags' : { 'public' },
'url' : '../EGL/extensions/NV/EGL_NV_coverage_sample.txt',
},
'GL_NV_deep_texture3D' : {
'number' : 424,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_deep_texture3D.txt',
},
'GLX_NV_delay_before_swap' : {
'number' : 445,
'flags' : { 'public' },
'url' : 'extensions/NV/GLX_NV_delay_before_swap.txt',
},
'GL_NV_depth_buffer_float' : {
'number' : 334,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_depth_buffer_float.txt',
},
'GL_NV_depth_clamp' : {
'number' : 260,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_depth_clamp.txt',
},
'GL_NV_depth_nonlinear' : {
'esnumber' : 73,
'flags' : { 'public' },
'url' : '../EGL/extensions/NV/EGL_NV_depth_nonlinear.txt',
},
'GL_NV_draw_buffers' : {
'esnumber' : 91,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_draw_buffers.txt',
},
'GL_NV_draw_instanced' : {
'esnumber' : 141,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_draw_instanced.txt',
},
'GL_NV_draw_texture' : {
'number' : 430,
'esnumber' : 126,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_draw_texture.txt',
},
'GL_NV_draw_vulkan_image' : {
'number' : 501,
'esnumber' : 274,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_draw_vulkan_image.txt',
},
'GL_NV_evaluators' : {
'number' : 225,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_evaluators.txt',
},
'GL_NV_explicit_attrib_location' : {
'esnumber' : 159,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_explicit_attrib_location.txt',
},
'GL_NV_explicit_multisample' : {
'number' : 357,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_explicit_multisample.txt',
},
'GL_NV_fbo_color_attachments' : {
'esnumber' : 92,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_fbo_color_attachments.txt',
},
'GL_NV_fence' : {
'number' : 222,
'esnumber' : 52,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_fence.txt',
},
'GL_NV_fill_rectangle' : {
'number' : 466,
'esnumber' : 232,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_fill_rectangle.txt',
},
'GL_NV_float_buffer' : {
'number' : 281,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_float_buffer.txt',
'alias' : { 'WGL_NV_float_buffer' },
},
'GL_NV_fog_distance' : {
'number' : 192,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_fog_distance.txt',
},
'GL_NV_fragment_coverage_to_color' : {
'number' : 467,
'esnumber' : 229,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_fragment_coverage_to_color.txt',
},
'GL_NV_fragment_program' : {
'number' : 282,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_fragment_program.txt',
},
'GL_NV_fragment_program2' : {
'number' : 304,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_fragment_program2.txt',
},
'GL_NV_fragment_program4' : {
'number' : 335,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_fragment_program4.txt',
},
'GL_NV_fragment_program_option' : {
'number' : 303,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_fragment_program_option.txt',
},
'GL_NV_fragment_shader_interlock' : {
'number' : 468,
'esnumber' : 230,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_fragment_shader_interlock.txt',
},
'GL_NV_framebuffer_blit' : {
'esnumber' : 142,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_framebuffer_blit.txt',
},
'GL_NV_framebuffer_mixed_samples' : {
'number' : 469,
'esnumber' : 231,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_framebuffer_mixed_samples.txt',
},
'GL_NV_framebuffer_multisample' : {
'esnumber' : 143,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_framebuffer_multisample.txt',
},
'GL_NV_framebuffer_multisample_coverage' : {
'number' : 336,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_framebuffer_multisample_coverage.txt',
},
'GL_NV_generate_mipmap_sRGB' : {
'esnumber' : 144,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_generate_mipmap_sRGB.txt',
},
'GL_NV_geometry_program4' : {
'number' : 323,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_geometry_program4.txt',
},
'GL_NV_geometry_shader4' : {
'number' : 338,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_geometry_shader4.txt',
},
'GL_NV_geometry_shader_passthrough' : {
'number' : 470,
'esnumber' : 233,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_geometry_shader_passthrough.txt',
},
'GL_NV_gpu_multicast' : {
'number' : 494,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_gpu_multicast.txt',
},
'GL_NV_gpu_program4' : {
'number' : 322,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_gpu_program4.txt',
},
'GL_NV_gpu_program5' : {
'number' : 388,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_gpu_program5.txt',
},
'GL_NV_gpu_program5_mem_extended' : {
'number' : 434,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_gpu_program5_mem_extended.txt',
},
'GL_NV_gpu_shader5' : {
'number' : 389,
'esnumber' : 260,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_gpu_shader5.txt',
},
'GL_NV_half_float' : {
'number' : 283,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_half_float.txt',
},
'GL_NV_image_formats' : {
'esnumber' : 200,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_image_formats.txt',
},
'GL_NV_instanced_arrays' : {
'esnumber' : 145,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_instanced_arrays.txt',
},
'GL_NV_internalformat_sample_query' : {
'number' : 475,
'esnumber' : 196,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_internalformat_sample_query.txt',
},
'GL_NV_light_max_exponent' : {
'number' : 189,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_light_max_exponent.txt',
},
'GL_NV_multisample_coverage' : {
'number' : 393,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_multisample_coverage.txt',
},
'GL_NV_multisample_filter_hint' : {
'number' : 259,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_multisample_filter_hint.txt',
},
'GL_NV_non_square_matrices' : {
'esnumber' : 160,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_non_square_matrices.txt',
},
'GL_NV_occlusion_query' : {
'number' : 261,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_occlusion_query.txt',
},
'GL_NV_pack_subimage' : {
'esnumber' : 132,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_pack_subimage.txt',
},
'GL_NV_packed_depth_stencil' : {
'number' : 226,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_packed_depth_stencil.txt',
},
'GL_NV_packed_float' : {
'esnumber' : 127,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_packed_float.txt',
},
'GL_NV_parameter_buffer_object' : {
'number' : 339,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_parameter_buffer_object.txt',
},
'GL_NV_parameter_buffer_object2' : {
'number' : 378,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_parameter_buffer_object2.txt',
},
'GL_NV_path_rendering' : {
'number' : 410,
'esnumber' : 199,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_path_rendering.txt',
},
'GL_NV_path_rendering_shared_edge' : {
'number' : 471,
'esnumber' : 234,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_path_rendering_shared_edge.txt',
},
'GL_NV_pixel_buffer_object' : {
'esnumber' : 134,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_pixel_buffer_object.txt',
},
'GL_NV_pixel_data_range' : {
'number' : 284,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_pixel_data_range.txt',
},
'GL_NV_platform_binary' : {
'esnumber' : 131,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_platform_binary.txt',
},
'GL_NV_point_sprite' : {
'number' : 262,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_point_sprite.txt',
},
'GL_NV_polygon_mode' : {
'esnumber' : 238,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_polygon_mode.txt',
},
'GL_NV_present_video' : {
'number' : 347,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_present_video.txt',
'alias' : { 'GLX_NV_present_video', 'WGL_NV_present_video' },
},
'GL_NV_primitive_restart' : {
'number' : 285,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_primitive_restart.txt',
},
'GL_NV_query_resource' : {
'number' : 511,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_query_resource.txt',
},
'GL_NV_query_resource_tag' : {
'number' : 512,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_query_resource_tag.txt',
},
'GL_NV_read_buffer' : {
'esnumber' : 93,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_read_buffer.txt',
},
'GL_NV_read_depth_stencil' : {
'esnumber' : 94,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_read_depth_stencil.txt',
},
'GL_NV_register_combiners' : {
'number' : 191,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_register_combiners.txt',
},
'GL_NV_register_combiners2' : {
'number' : 227,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_register_combiners2.txt',
},
'GL_NV_robustness_video_memory_purge' : {
'number' : 484,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_robustness_video_memory_purge.txt',
},
'GL_NV_sRGB_formats' : {
'esnumber' : 148,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_sRGB_formats.txt',
},
'GL_NV_sample_locations' : {
'number' : 472,
'esnumber' : 235,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_sample_locations.txt',
},
'GL_NV_sample_mask_override_coverage' : {
'number' : 473,
'esnumber' : 236,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_sample_mask_override_coverage.txt',
},
'GL_NV_shader_atomic_counters' : {
'number' : 423,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_shader_atomic_counters.txt',
},
'GL_NV_shader_atomic_float' : {
'number' : 419,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_shader_atomic_float.txt',
},
'GL_NV_shader_atomic_float64' : {
'number' : 488,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_shader_atomic_float64.txt',
},
'GL_NV_shader_atomic_fp16_vector' : {
'number' : 474,
'esnumber' : 261,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_shader_atomic_fp16_vector.txt',
},
'GL_NV_shader_atomic_int64' : {
'number' : 455,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_shader_atomic_int64.txt',
},
'GL_NV_shader_buffer_load' : {
'number' : 379,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_shader_buffer_load.txt',
},
'GL_NV_shader_buffer_store' : {
'number' : 390,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_shader_buffer_store.txt',
},
'GL_NV_shader_noperspective_interpolation' : {
'esnumber' : 201,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_shader_noperspective_interpolation.txt',
},
'GL_NV_shader_storage_buffer_object' : {
'number' : 422,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_shader_storage_buffer_object.txt',
},
'GL_NV_shader_thread_group' : {
'number' : 447,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_shader_thread_group.txt',
},
'GL_NV_shader_thread_shuffle' : {
'number' : 448,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_shader_thread_shuffle.txt',
},
'GL_NV_shadow_samplers_array' : {
'esnumber' : 146,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_shadow_samplers_array.txt',
},
'GL_NV_shadow_samplers_cube' : {
'esnumber' : 147,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_shadow_samplers_cube.txt',
},
'GL_NV_stereo_view_rendering' : {
'number' : 489,
'esnumber' : 296,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_stereo_view_rendering.txt',
},
'GLX_NV_swap_group' : {
'number' : 350,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/GLX_NV_swap_group.txt',
},
'GL_NV_tessellation_program5' : {
'number' : 391,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_tessellation_program5.txt',
},
'GL_NV_texgen_emboss' : {
'number' : 193,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_texgen_emboss.txt',
},
'GL_NV_texgen_reflection' : {
'number' : 179,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_texgen_reflection.txt',
},
'GL_NV_texture_array' : {
'esnumber' : 133,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_texture_array.txt',
},
'GL_NV_texture_barrier' : {
'number' : 381,
'esnumber' : 271,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_texture_barrier.txt',
},
'GL_NV_texture_border_clamp' : {
'esnumber' : 149,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_texture_border_clamp.txt',
},
'GL_NV_texture_compression_latc' : {
'esnumber' : 130,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_texture_compression_latc.txt',
},
'GL_NV_texture_compression_s3tc' : {
'esnumber' : 128,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_texture_compression_s3tc.txt',
},
'GL_NV_texture_compression_s3tc_update' : {
'esnumber' : 95,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_texture_compression_s3tc_update.txt',
},
'GL_NV_texture_compression_vtc' : {
'number' : 228,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_texture_compression_vtc.txt',
},
'GL_NV_texture_env_combine4' : {
'number' : 195,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_texture_env_combine4.txt',
},
'GL_NV_texture_expand_normal' : {
'number' : 286,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_texture_expand_normal.txt',
},
'GL_NV_texture_multisample' : {
'number' : 403,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_texture_multisample.txt',
},
'GL_NV_texture_npot_2D_mipmap' : {
'esnumber' : 96,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_texture_npot_2D_mipmap.txt',
},
'GL_NV_texture_rectangle' : {
'number' : 229,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_texture_rectangle.txt',
},
'GL_NV_texture_rectangle_compressed' : {
'number' : 509,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_texture_rectangle_compressed.txt',
},
'GL_NV_texture_shader' : {
'number' : 230,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_texture_shader.txt',
},
'GL_NV_texture_shader2' : {
'number' : 231,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_texture_shader2.txt',
},
'GL_NV_texture_shader3' : {
'number' : 265,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_texture_shader3.txt',
},
'GL_NV_transform_feedback' : {
'number' : 341,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_transform_feedback.txt',
},
'GL_NV_transform_feedback2' : {
'number' : 358,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_transform_feedback2.txt',
},
'GL_NV_uniform_buffer_unified_memory' : {
'number' : 459,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_uniform_buffer_unified_memory.txt',
},
'GL_NV_vdpau_interop' : {
'number' : 396,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_vdpau_interop.txt',
},
'GL_NV_vertex_array_range' : {
'number' : 190,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_vertex_array_range.txt',
},
'GL_NV_vertex_array_range2' : {
'number' : 232,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_vertex_array_range2.txt',
},
'GL_NV_vertex_attrib_integer_64bit' : {
'number' : 392,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_vertex_attrib_integer_64bit.txt',
},
'GL_NV_vertex_buffer_unified_memory' : {
'number' : 380,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_vertex_buffer_unified_memory.txt',
},
'GL_NV_vertex_program' : {
'number' : 233,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_vertex_program.txt',
},
'GL_NV_vertex_program1_1' : {
'number' : 266,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_vertex_program1_1.txt',
},
'GL_NV_vertex_program2' : {
'number' : 287,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_vertex_program2.txt',
},
'GL_NV_vertex_program2_option' : {
'number' : 305,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_vertex_program2_option.txt',
},
'GL_NV_vertex_program3' : {
'number' : 306,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_vertex_program3.txt',
},
'GL_NV_vertex_program4' : {
'number' : 325,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_vertex_program4.txt',
},
'GL_NV_video_capture' : {
'number' : 374,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/NV_video_capture.txt',
'alias' : { 'GLX_NV_video_capture', 'WGL_NV_video_capture' },
},
'GLX_NV_video_out' : {
'number' : 348,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/GLX_NV_video_out.txt',
},
'GL_NV_viewport_array' : {
'esnumber' : 202,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_viewport_array.txt',
},
'GL_NV_viewport_array2' : {
'number' : 476,
'esnumber' : 237,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_viewport_array2.txt',
},
'GL_NV_viewport_swizzle' : {
'number' : 483,
'esnumber' : 258,
'flags' : { 'public' },
'url' : 'extensions/NV/NV_viewport_swizzle.txt',
},
'GL_OES_EGL_image' : {
'esnumber' : 23,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_EGL_image.txt',
},
'GL_OES_EGL_image_external' : {
'esnumber' : 87,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_EGL_image_external.txt',
},
'GL_OES_EGL_image_external_essl3' : {
'esnumber' : 220,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_EGL_image_external_essl3.txt',
},
'GL_OES_EGL_sync' : {
'esnumber' : 75,
'flags' : { 'public' },
'url' : '../EGL/extensions/KHR/EGL_KHR_fence_sync.txt',
},
'GL_OES_blend_equation_separate' : {
'esnumber' : 1,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_blend_equation_separate.txt',
},
'GL_OES_blend_func_separate' : {
'esnumber' : 2,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_blend_func_separate.txt',
},
'GL_OES_blend_subtract' : {
'esnumber' : 3,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_blend_subtract.txt',
},
'GL_OES_byte_coordinates' : {
'number' : 291,
'esnumber' : 4,
'flags' : { 'public' },
'supporters' : { 'KHR' },
'url' : 'extensions/OES/OES_byte_coordinates.txt',
},
'GL_OES_compressed_ETC1_RGB8_texture' : {
'esnumber' : 5,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_compressed_ETC1_RGB8_texture.txt',
},
'GL_OES_compressed_paletted_texture' : {
'number' : 294,
'esnumber' : 6,
'flags' : { 'public' },
'supporters' : { 'KHR' },
'url' : 'extensions/OES/OES_compressed_paletted_texture.txt',
},
'GL_OES_copy_image' : {
'esnumber' : 208,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_copy_image.txt',
},
'GL_OES_depth24' : {
'esnumber' : 24,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_depth24.txt',
},
'GL_OES_depth32' : {
'esnumber' : 25,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_depth32.txt',
},
'GL_OES_depth_texture' : {
'esnumber' : 43,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_depth_texture.txt',
},
'GL_OES_depth_texture_cube_map' : {
'esnumber' : 136,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_depth_texture_cube_map.txt',
},
'GL_OES_draw_buffers_indexed' : {
'esnumber' : 209,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_draw_buffers_indexed.txt',
},
'GL_OES_draw_elements_base_vertex' : {
'esnumber' : 219,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_draw_elements_base_vertex.txt',
},
'GL_OES_draw_texture' : {
'esnumber' : 7,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_draw_texture.txt',
},
'GL_OES_element_index_uint' : {
'esnumber' : 26,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_element_index_uint.txt',
},
'GL_OES_extended_matrix_palette' : {
'esnumber' : 8,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_extended_matrix_palette.txt',
},
'GL_OES_fbo_render_mipmap' : {
'esnumber' : 27,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_fbo_render_mipmap.txt',
},
'GL_OES_fixed_point' : {
'number' : 292,
'esnumber' : 9,
'flags' : { 'public' },
'supporters' : { 'KHR' },
'url' : 'extensions/OES/OES_fixed_point.txt',
},
'GL_OES_fragment_precision_high' : {
'esnumber' : 28,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_fragment_precision_high.txt',
},
'GL_OES_framebuffer_object' : {
'esnumber' : 10,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_framebuffer_object.txt',
},
'GL_OES_geometry_shader' : {
'esnumber' : 210,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_geometry_shader.txt',
},
'GL_OES_get_program_binary' : {
'esnumber' : 47,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_get_program_binary.txt',
},
'GL_OES_gpu_shader5' : {
'esnumber' : 211,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_gpu_shader5.txt',
},
'GL_OES_mapbuffer' : {
'esnumber' : 29,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_mapbuffer.txt',
},
'GL_OES_matrix_get' : {
'esnumber' : 11,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_matrix_get.txt',
},
'GL_OES_matrix_palette' : {
'esnumber' : 12,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_matrix_palette.txt',
},
'GL_OES_packed_depth_stencil' : {
'esnumber' : 44,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_packed_depth_stencil.txt',
},
'GL_OES_paletted_texture' : {
'esnumber' : 13,
'flags' : { 'incomplete', 'private' },
'comments' : 'Draft spec location unknown.',
},
'GL_OES_point_size_array' : {
'esnumber' : 14,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_point_size_array.txt',
},
'GL_OES_point_sprite' : {
'esnumber' : 15,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_point_sprite.txt',
},
'GL_OES_primitive_bounding_box' : {
'esnumber' : 212,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_primitive_bounding_box.txt',
},
'GL_OES_query_matrix' : {
'number' : 296,
'esnumber' : 16,
'flags' : { 'public' },
'supporters' : { 'KHR' },
'url' : 'extensions/OES/OES_query_matrix.txt',
},
'GL_OES_read_format' : {
'number' : 295,
'esnumber' : 17,
'flags' : { 'public' },
'supporters' : { 'KHR' },
'url' : 'extensions/OES/OES_read_format.txt',
},
'GL_OES_required_internalformat' : {
'esnumber' : 115,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_required_internalformat.txt',
},
'GL_OES_rgb8_rgba8' : {
'esnumber' : 30,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_rgb8_rgba8.txt',
},
'GL_OES_sample_shading' : {
'esnumber' : 169,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_sample_shading.txt',
},
'GL_OES_sample_variables' : {
'esnumber' : 170,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_sample_variables.txt',
},
'GL_OES_shader_image_atomic' : {
'esnumber' : 171,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_shader_image_atomic.txt',
},
'GL_OES_shader_io_blocks' : {
'esnumber' : 213,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_shader_io_blocks.txt',
},
'GL_OES_shader_multisample_interpolation' : {
'esnumber' : 172,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_shader_multisample_interpolation.txt',
},
'GL_OES_single_precision' : {
'number' : 293,
'esnumber' : 18,
'flags' : { 'public' },
'supporters' : { 'KHR' },
'url' : 'extensions/OES/OES_single_precision.txt',
},
'GL_OES_standard_derivatives' : {
'esnumber' : 45,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_standard_derivatives.txt',
},
'GL_OES_stencil1' : {
'esnumber' : 31,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_stencil1.txt',
},
'GL_OES_stencil4' : {
'esnumber' : 32,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_stencil4.txt',
},
'GL_OES_stencil8' : {
'esnumber' : 33,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_stencil8.txt',
},
'GL_OES_stencil_wrap' : {
'esnumber' : 19,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_stencil_wrap.txt',
},
'GL_OES_surfaceless_context' : {
'esnumber' : 116,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_surfaceless_context.txt',
},
'GL_OES_tessellation_shader' : {
'esnumber' : 214,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_tessellation_shader.txt',
},
'GL_OES_texture_3D' : {
'esnumber' : 34,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_3D.txt',
},
'GL_OES_texture_border_clamp' : {
'esnumber' : 215,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_border_clamp.txt',
},
'GL_OES_texture_buffer' : {
'esnumber' : 216,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_buffer.txt',
},
'GL_OES_texture_compression_astc' : {
'esnumber' : 162,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_compression_astc.txt',
},
'GL_OES_texture_cube_map' : {
'esnumber' : 20,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_cube_map.txt',
},
'GL_OES_texture_cube_map_array' : {
'esnumber' : 217,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_cube_map_array.txt',
},
'GL_OES_texture_env_crossbar' : {
'esnumber' : 21,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_env_crossbar.txt',
},
'GL_OES_texture_float' : {
'esnumber' : 36,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_float.txt',
'alias' : { 'GL_OES_texture_half_float' },
},
'GL_OES_texture_float_linear' : {
'esnumber' : 35,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_float_linear.txt',
'alias' : { 'GL_OES_texture_half_float_linear' },
},
'GL_OES_texture_mirrored_repeat' : {
'esnumber' : 22,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_mirrored_repeat.txt',
},
'GL_OES_texture_npot' : {
'esnumber' : 37,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_npot.txt',
},
'GL_OES_texture_stencil8' : {
'esnumber' : 173,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_stencil8.txt',
},
'GL_OES_texture_storage_multisample_2d_array' : {
'esnumber' : 174,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_storage_multisample_2d_array.txt',
},
'GL_OES_texture_view' : {
'esnumber' : 218,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_texture_view.txt',
},
'GL_OES_vertex_array_object' : {
'esnumber' : 71,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_vertex_array_object.txt',
},
'GL_OES_vertex_half_float' : {
'esnumber' : 38,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_vertex_half_float.txt',
},
'GL_OES_vertex_type_10_10_10_2' : {
'esnumber' : 46,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_vertex_type_10_10_10_2.txt',
},
'GL_OES_viewport_array' : {
'esnumber' : 267,
'flags' : { 'public' },
'url' : 'extensions/OES/OES_viewport_array.txt',
},
'GL_OML_interlace' : {
'number' : 239,
'flags' : { 'public' },
'supporters' : { 'KHR' },
'url' : 'extensions/OML/OML_interlace.txt',
},
'GL_OML_resample' : {
'number' : 241,
'flags' : { 'public' },
'supporters' : { 'KHR' },
'url' : 'extensions/OML/OML_resample.txt',
},
'GL_OML_subsample' : {
'number' : 240,
'flags' : { 'public' },
'supporters' : { 'KHR' },
'url' : 'extensions/OML/OML_subsample.txt',
},
'GLX_OML_swap_method' : {
'number' : 237,
'flags' : { 'public' },
'supporters' : { 'KHR' },
'url' : 'extensions/OML/GLX_OML_swap_method.txt',
},
'GLX_OML_sync_control' : {
'number' : 238,
'flags' : { 'public' },
'supporters' : { 'KHR' },
'url' : 'extensions/OML/GLX_OML_sync_control.txt',
},
'GL_OVR_multiview' : {
'number' : 478,
'esnumber' : 241,
'flags' : { 'public' },
'url' : 'extensions/OVR/OVR_multiview.txt',
},
'GL_OVR_multiview2' : {
'number' : 479,
'esnumber' : 242,
'flags' : { 'public' },
'url' : 'extensions/OVR/OVR_multiview2.txt',
},
'GL_OVR_multiview_multisampled_render_to_texture' : {
'esnumber' : 250,
'flags' : { 'public' },
'url' : 'extensions/OVR/OVR_multiview_multisampled_render_to_texture.txt',
},
'GL_PGI_misc_hints' : {
'number' : 77,
'flags' : { 'public' },
'supporters' : { 'TGS' },
'url' : 'extensions/PGI/PGI_misc_hints.txt',
},
'GL_PGI_vertex_hints' : {
'number' : 76,
'flags' : { 'public' },
'supporters' : { 'TGS' },
'url' : 'extensions/PGI/PGI_vertex_hints.txt',
},
'GL_QCOM_alpha_test' : {
'esnumber' : 89,
'flags' : { 'public' },
'url' : 'extensions/QCOM/QCOM_alpha_test.txt',
},
'GL_QCOM_binning_control' : {
'esnumber' : 119,
'flags' : { 'public' },
'url' : 'extensions/QCOM/QCOM_binning_control.txt',
},
'GL_QCOM_driver_control' : {
'esnumber' : 55,
'flags' : { 'public' },
'url' : 'extensions/QCOM/QCOM_driver_control.txt',
},
'GL_QCOM_extended_get' : {
'esnumber' : 62,
'flags' : { 'public' },
'url' : 'extensions/QCOM/QCOM_extended_get.txt',
},
'GL_QCOM_extended_get2' : {
'esnumber' : 63,
'flags' : { 'public' },
'url' : 'extensions/QCOM/QCOM_extended_get2.txt',
},
'GL_QCOM_performance_monitor_global_mode' : {
'esnumber' : 56,
'flags' : { 'public' },
'url' : 'extensions/QCOM/QCOM_performance_monitor_global_mode.txt',
},
'GL_QCOM_tiled_rendering' : {
'esnumber' : 70,
'flags' : { 'public' },
'supporters' : { 'QCOM' },
'url' : 'extensions/QCOM/QCOM_tiled_rendering.txt',
},
'GL_QCOM_writeonly_rendering' : {
'esnumber' : 61,
'flags' : { 'public' },
'url' : 'extensions/QCOM/QCOM_writeonly_rendering.txt',
},
'GL_QCOM_framebuffer_foveated' : {
'esnumber' : 273,
'flags' : { 'public' },
'url' : 'extensions/QCOM/QCOM_framebuffer_foveated.txt',
},
'GL_QCOM_texture_foveated' : {
'esnumber' : 293,
'flags' : { 'public' },
'url' : 'extensions/QCOM/QCOM_texture_foveated.txt',
},
'GL_QCOM_shader_framebuffer_fetch_noncoherent' : {
'esnumber' : 277,
'flags' : { 'public' },
'url' : 'extensions/QCOM/QCOM_shader_framebuffer_fetch_noncoherent.txt',
},
'GL_REND_screen_coordinates' : {
'number' : 155,
'flags' : { 'public' },
'supporters' : { 'REND' },
'url' : 'extensions/REND/REND_screen_coordinates.txt',
},
'GL_S3_s3tc' : {
'number' : 276,
'flags' : { 'public' },
'supporters' : { 'ATI', 'NVIDIA' },
'url' : 'extensions/S3/S3_s3tc.txt',
},
'GLX_SGIS_blended_overlay' : {
'number' : 142,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIS/GLX_SGIS_blended_overlay.txt',
},
'GL_SGIS_clip_band_hint' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIS/SGIS_clip_band_hint.txt',
},
'GLX_SGIS_color_range' : {
'number' : 115,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIS/GLX_SGIS_color_range.txt',
'alias' : { 'GL_SGIS_color_range' },
},
'GL_SGIS_detail_texture' : {
'number' : 21,
'flags' : { 'public' },
'supporters' : { 'KGC', 'SGI' },
'url' : 'extensions/SGIS/SGIS_detail_texture.txt',
},
'GL_SGIS_fog_function' : {
'number' : 64,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIS/SGIS_fog_function.txt',
},
'GL_SGIS_generate_mipmap' : {
'number' : 32,
'flags' : { 'public' },
'supporters' : { 'HP', 'SGI' },
'url' : 'extensions/SGIS/SGIS_generate_mipmap.txt',
},
'GL_SGIS_line_texgen' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIS/SGIS_line_texgen.txt',
},
'GL_SGIS_multisample' : {
'number' : 25,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIS/SGIS_multisample.txt',
'alias' : { 'GLX_SGIS_multisample' },
},
'GL_SGIS_multitexture' : {
'number' : 116,
'flags' : { 'obsolete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIS/SGIS_multitexture.txt',
},
'GL_SGIS_pixel_texture' : {
'number' : 15,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIS/SGIS_pixel_texture.txt',
},
'GL_SGIS_point_line_texgen' : {
'number' : 213,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIS/SGIS_point_line_texgen.txt',
},
'GL_SGIS_shared_multisample' : {
'number' : 143,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIS/SGIS_shared_multisample.txt',
'alias' : { 'GLX_SGIS_shared_multisample' },
},
'GL_SGIS_sharpen_texture' : {
'number' : 22,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIS/SGIS_sharpen_texture.txt',
},
'GL_SGIS_texture4D' : {
'number' : 16,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIS/SGIS_texture4D.txt',
},
'GL_SGIS_texture_border_clamp' : {
'number' : 36,
'flags' : { 'public' },
'supporters' : { 'HP', 'INGR', 'SGI' },
'url' : 'extensions/SGIS/SGIS_texture_border_clamp.txt',
},
'GL_SGIS_texture_color_mask' : {
'number' : 214,
'flags' : { 'incomplete', 'public' },
'url' : 'extensions/SGIS/SGIS_texture_color_mask.txt',
},
'GL_SGIS_texture_edge_clamp' : {
'number' : 35,
'flags' : { 'public' },
'supporters' : { 'HP', 'INGR', 'SGI' },
'url' : 'extensions/SGIS/SGIS_texture_edge_clamp.txt',
},
'GL_SGIS_texture_filter4' : {
'number' : 7,
'flags' : { 'public' },
'supporters' : { 'KGC', 'SGI' },
'url' : 'extensions/SGIS/SGIS_texture_filter4.txt',
},
'GL_SGIS_texture_lod' : {
'number' : 24,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIS/SGIS_texture_lod.txt',
},
'GL_SGIS_texture_select' : {
'number' : 51,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIS/SGIS_texture_select.txt',
},
'GL_SGIX_async' : {
'number' : 132,
'flags' : { 'incomplete', 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_async.txt',
},
'GL_SGIX_async_histogram' : {
'number' : 134,
'flags' : { 'incomplete', 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_async_histogram.txt',
},
'GL_SGIX_async_pixel' : {
'number' : 133,
'flags' : { 'incomplete', 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_async_pixel.txt',
},
'GL_SGIX_bali_g_instruments' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_bali_g_instruments.txt',
},
'GL_SGIX_bali_r_instruments' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_bali_r_instruments.txt',
},
'GL_SGIX_bali_timer_instruments' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_bali_timer_instruments.txt',
},
'GL_SGIX_blend_alpha_minmax' : {
'number' : 119,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_blend_alpha_minmax.txt',
},
'GL_SGIX_blend_cadd' : {
'number' : 150,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_blend_cadd.txt',
},
'GL_SGIX_blend_cmultiply' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_blend_cmultiply.txt',
},
'GL_SGIX_calligraphic_fragment' : {
'number' : 82,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_calligraphic_fragment.txt',
},
'GL_SGIX_clipmap' : {
'number' : 33,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_clipmap.txt',
},
'GL_SGIX_color_matrix_accuracy' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_color_matrix_accuracy.txt',
},
'GL_SGIX_color_table_index_mode' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_color_table_index_mode.txt',
},
'GLX_SGIX_color_type' : {
'number' : 89,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/GLX_SGIX_color_type.txt',
'alias' : { 'GL_SGIX_color_type' },
},
'GLX_SGIX_color_typeXXX' : {
'number' : 72,
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/GLX_SGIX_color_typeXXX.txt',
},
'GL_SGIX_complex_polar' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_complex_polar.txt',
},
'GL_SGIX_convolution_accuracy' : {
'number' : 211,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_convolution_accuracy.txt',
},
'GL_SGIX_cube_map' : {
'number' : 130,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_cube_map.txt',
},
'GL_SGIX_cylinder_texgen' : {
'number' : 140,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_cylinder_texgen.txt',
},
'GL_SGIX_datapipe' : {
'number' : 152,
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_datapipe.txt',
},
'GL_SGIX_decimation' : {
'number' : 125,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_decimation.txt',
},
'GL_SGIX_depth_pass_instrument' : {
'number' : 205,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_depth_pass_instrument.txt',
},
'GL_SGIX_depth_texture' : {
'number' : 63,
'flags' : { 'public' },
'supporters' : { 'HP', 'SGI' },
'url' : 'extensions/SGIX/SGIX_depth_texture.txt',
},
'GLX_SGIX_dm_buffer' : {
'number' : 86,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/GLX_SGIX_dm_buffer.txt',
},
'GL_SGIX_dvc' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_dvc.txt',
},
'GLX_SGIX_fbconfig' : {
'number' : 49,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/GLX_SGIX_fbconfig.txt',
},
'GLX_SGIX_fbconfig_float' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/GLX_SGIX_fbconfig_float.txt',
},
'GL_SGIX_flush_raster' : {
'number' : 61,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_flush_raster.txt',
},
'GL_SGIX_fog_blend' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_fog_blend.txt',
},
'GL_SGIX_fog_factor_to_alpha' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_fog_factor_to_alpha.txt',
},
'GL_SGIX_fog_layers' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_fog_layers.txt',
},
'GL_SGIX_fog_offset' : {
'number' : 65,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_fog_offset.txt',
},
'GL_SGIX_fog_patchy' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_fog_patchy.txt',
},
'GL_SGIX_fog_scale' : {
'number' : 161,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_fog_scale.txt',
},
'GL_SGIX_fog_texture' : {
'flags' : { 'public' },
'url' : 'extensions/SGIX/SGIX_fog_texture.txt',
},
'GL_SGIX_fragment_lighting_space' : {
'number' : 118,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_fragment_lighting_space.txt',
},
'GL_SGIX_fragment_specular_lighting' : {
'flags' : { 'incomplete', 'public' },
'url' : 'extensions/SGIX/SGIX_fragment_specular_lighting.txt',
},
'GL_SGIX_fragments_instrument' : {
'number' : 180,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_fragments_instrument.txt',
},
'GL_SGIX_framezoom' : {
'number' : 57,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_framezoom.txt',
},
'GLX_SGIX_hyperpipe' : {
'number' : 307,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/GLX_SGIX_hyperpipe.txt',
},
'GLU_SGIX_icc_compress' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/GLU_SGIX_icc_compress.txt',
},
'GL_SGIX_icc_texture' : {
'number' : 154,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_icc_texture.txt',
},
'GL_SGIX_igloo_interface' : {
'number' : 219,
'flags' : { 'incomplete' },
'supporters' : { 'MESA' },
'url' : 'extensions/SGIX/SGIX_igloo_interface.txt',
},
'GL_SGIX_image_compression' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_image_compression.txt',
},
'GL_SGIX_impact_pixel_texture' : {
'number' : 126,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_impact_pixel_texture.txt',
},
'GL_SGIX_instrument_error' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_instrument_error.txt',
},
'GL_SGIX_instruments' : {
'number' : 55,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_instruments.txt',
},
'GL_SGIX_interlace' : {
'number' : 45,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_interlace.txt',
},
'GL_SGIX_ir_instrument1' : {
'number' : 81,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_ir_instrument1.txt',
},
'GL_SGIX_line_quality_hint' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_line_quality_hint.txt',
},
'GL_SGIX_list_priority' : {
'number' : 80,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_list_priority.txt',
},
'GL_SGIX_mpeg1' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_mpeg1.txt',
},
'GL_SGIX_mpeg2' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_mpeg2.txt',
},
'GL_SGIX_nonlinear_lighting_pervertex' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_nonlinear_lighting_pervertex.txt',
},
'GL_SGIX_nurbs_eval' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_nurbs_eval.txt',
},
'GL_SGIX_occlusion_instrument' : {
'number' : 151,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_occlusion_instrument.txt',
},
'GL_SGIX_packed_6bytes' : {
'number' : 162,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_packed_6bytes.txt',
},
'GLX_SGIX_pbuffer' : {
'number' : 50,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/GLX_SGIX_pbuffer.txt',
},
'GL_SGIX_pixel_texture' : {
'number' : 499,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_pixel_texture.txt',
'comments' : 'Previously shared extension number 15 with SGIS_pixel_texture.',
},
'GL_SGIX_pixel_texture_bits' : {
'number' : 127,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_pixel_texture_bits.txt',
},
'GL_SGIX_pixel_texture_lod' : {
'number' : 128,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_pixel_texture_lod.txt',
},
'GL_SGIX_pixel_tiles' : {
'number' : 46,
'flags' : { 'obsolete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_pixel_tiles.txt',
},
'GL_SGIX_polynomial_ffd' : {
'number' : 59,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_polynomial_ffd.txt',
},
'GL_SGIX_quad_mesh' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_quad_mesh.txt',
},
'GL_SGIX_reference_plane' : {
'number' : 60,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_reference_plane.txt',
},
'GL_SGIX_resample' : {
'number' : 212,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_resample.txt',
},
'GL_SGIX_scalebias_hint' : {
'number' : 236,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_scalebias_hint.txt',
},
'GL_SGIX_shadow' : {
'number' : 34,
'flags' : { 'public' },
'supporters' : { 'HP', 'SGI' },
'url' : 'extensions/SGIX/SGIX_shadow.txt',
},
'GL_SGIX_shadow_ambient' : {
'number' : 90,
'flags' : { 'public' },
'supporters' : { 'HP', 'SGI' },
'url' : 'extensions/SGIX/SGIX_shadow_ambient.txt',
},
'GL_SGIX_slim' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_slim.txt',
},
'GL_SGIX_spotlight_cutoff' : {
'number' : 131,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_spotlight_cutoff.txt',
},
'GL_SGIX_sprite' : {
'number' : 52,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_sprite.txt',
},
'GL_SGIX_subdiv_patch' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_subdiv_patch.txt',
},
'GL_SGIX_subsample' : {
'number' : 202,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_subsample.txt',
},
'GLX_SGIX_swap_barrier' : {
'number' : 92,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/GLX_SGIX_swap_barrier.txt',
},
'GLX_SGIX_swap_group' : {
'number' : 91,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/GLX_SGIX_swap_group.txt',
},
'GL_SGIX_tag_sample_buffer' : {
'number' : 58,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_tag_sample_buffer.txt',
},
'GL_SGIX_texture_add_env' : {
'number' : 69,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_texture_add_env.txt',
},
'GL_SGIX_texture_coordinate_clamp' : {
'number' : 235,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_texture_coordinate_clamp.txt',
},
'GL_SGIX_texture_lod_bias' : {
'number' : 84,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_texture_lod_bias.txt',
},
'GL_SGIX_texture_mipmap_anisotropic' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_texture_mipmap_anisotropic.txt',
},
'GL_SGIX_texture_multi_buffer' : {
'number' : 53,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_texture_multi_buffer.txt',
},
'GL_SGIX_texture_phase' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_texture_phase.txt',
},
'GL_SGIX_texture_range' : {
'number' : 181,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_texture_range.txt',
},
'GL_SGIX_texture_scale_bias' : {
'number' : 56,
'flags' : { 'public' },
'supporters' : { 'HP', 'SGI' },
'url' : 'extensions/SGIX/SGIX_texture_scale_bias.txt',
},
'GL_SGIX_texture_supersample' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_texture_supersample.txt',
},
'GL_SGIX_vector_ops' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/SGIX_vector_ops.txt',
},
'GL_SGIX_vertex_array_object' : {
'flags' : { 'obsolete' },
'url' : 'extensions/SGIX/SGIX_vertex_array_object.txt',
},
'GL_SGIX_vertex_preclip' : {
'number' : 210,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_vertex_preclip.txt',
'alias' : { 'GL_SGIX_vertex_preclip_hint' },
},
'GLX_SGIX_video_resize' : {
'number' : 83,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/GLX_SGIX_video_resize.txt',
},
'GLX_SGIX_video_resize_float' : {
'number' : 184,
'flags' : { 'incomplete', 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/GLX_SGIX_video_resize_float.txt',
},
'GLX_SGIX_video_source' : {
'number' : 43,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/GLX_SGIX_video_source.txt',
},
'GLX_SGIX_visual_select_group' : {
'number' : 234,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/GLX_SGIX_visual_select_group.txt',
},
'GLX_SGIX_wait_group' : {
'flags' : { 'incomplete' },
'url' : 'extensions/SGIX/GLX_SGIX_wait_group.txt',
},
'GL_SGIX_ycrcb' : {
'number' : 101,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_ycrcb.txt',
},
'GL_SGIX_ycrcb_subsample' : {
'number' : 204,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_ycrcb_subsample.txt',
'comments' : 'Supported on Visual Workstation 320 / 540 only.',
},
'GL_SGIX_ycrcba' : {
'number' : 203,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGIX/SGIX_ycrcba.txt',
},
'GL_SGI_color_matrix' : {
'number' : 13,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGI/SGI_color_matrix.txt',
},
'GL_SGI_color_table' : {
'number' : 14,
'flags' : { 'public' },
'supporters' : { 'HP', 'SGI', 'SUN' },
'url' : 'extensions/SGI/SGI_color_table.txt',
'comments' : 'Partial HP support.',
},
'GL_SGI_complex' : {
'number' : 87,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGI/SGI_complex.txt',
},
'GL_SGI_complex_type' : {
'number' : 88,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGI/SGI_complex_type.txt',
},
'GLX_SGI_cushion' : {
'number' : 62,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGI/GLX_SGI_cushion.txt',
},
'GL_SGI_fft' : {
'number' : 99,
'flags' : { 'incomplete' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGI/SGI_fft.txt',
},
'GLU_SGI_filter4_parameters' : {
'number' : 85,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGI/GLU_SGI_filter4_parameters.txt',
},
'GLX_SGI_make_current_read' : {
'number' : 42,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGI/GLX_SGI_make_current_read.txt',
},
'GLX_SGI_swap_control' : {
'number' : 40,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGI/GLX_SGI_swap_control.txt',
},
'GL_SGI_texture_color_table' : {
'number' : 17,
'flags' : { 'public' },
'supporters' : { 'ES', 'HP', 'SGI', 'SUN' },
'url' : 'extensions/SGI/SGI_texture_color_table.txt',
},
'GLX_SGI_transparent_pixel' : {
'number' : 153,
'flags' : { 'obsolete' },
'url' : 'extensions/SGI/GLX_SGI_transparent_pixel.txt',
},
'GLX_SGI_video_sync' : {
'number' : 41,
'flags' : { 'public' },
'supporters' : { 'SGI' },
'url' : 'extensions/SGI/GLX_SGI_video_sync.txt',
},
'GL_SUNX_constant_data' : {
'number' : 163,
'flags' : { 'public' },
'supporters' : { 'SUN' },
'url' : 'extensions/SUNX/SUNX_constant_data.txt',
},
'GL_SUN_convolution_border_modes' : {
'number' : 182,
'flags' : { 'public' },
'supporters' : { 'SUN' },
'url' : 'extensions/SUN/SUN_convolution_border_modes.txt',
},
'GLX_SUN_get_transparent_index' : {
'number' : 183,
'flags' : { 'public' },
'supporters' : { 'SUN' },
'url' : 'extensions/SUN/GLX_SUN_get_transparent_index.txt',
},
'GL_SUN_global_alpha' : {
'number' : 164,
'flags' : { 'public' },
'supporters' : { 'SUN' },
'url' : 'extensions/SUN/SUN_global_alpha.txt',
},
'GL_SUN_mesh_array' : {
'number' : 257,
'flags' : { 'public' },
'supporters' : { 'SUN' },
'url' : 'extensions/SUN/SUN_mesh_array.txt',
},
'GL_SUN_slice_accum' : {
'number' : 258,
'flags' : { 'public' },
'supporters' : { 'SUN' },
'url' : 'extensions/SUN/SUN_slice_accum.txt',
},
'GL_SUN_triangle_list' : {
'number' : 165,
'flags' : { 'public' },
'supporters' : { 'SUN' },
'url' : 'extensions/SUN/SUN_triangle_list.txt',
},
'GL_SUN_vertex' : {
'number' : 166,
'flags' : { 'public' },
'supporters' : { 'SUN' },
'url' : 'extensions/SUN/SUN_vertex.txt',
},
'GL_VIV_shader_binary' : {
'esnumber' : 85,
'flags' : { 'public' },
'url' : 'extensions/VIV/VIV_shader_binary.txt',
},
'WGL_3DL_stereo_control' : {
'number' : 313,
'flags' : { 'public' },
'supporters' : { '3DL' },
'url' : 'extensions/3DL/WGL_3DL_stereo_control.txt',
},
'WGL_AMD_gpu_association' : {
'number' : 361,
'flags' : { 'public' },
'supporters' : { 'AMD' },
'url' : 'extensions/AMD/WGL_AMD_gpu_association.txt',
},
'WGL_ARB_buffer_region' : {
'arbnumber' : 4,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/WGL_ARB_buffer_region.txt',
},
'WGL_ARB_create_context' : {
'arbnumber' : 55,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/WGL_ARB_create_context.txt',
'comments' : 'Alias to WGL_ARB_create_context_profile not needed - see arbnumber 74.',
},
'WGL_ARB_create_context_profile' : {
'arbnumber' : 74,
'flags' : { 'public' },
'url' : 'extensions/ARB/WGL_ARB_create_context.txt',
'comments' : 'Included with arbnumber 55, WGL_ARB_create_context.',
},
'WGL_ARB_create_context_robustness' : {
'arbnumber' : 102,
'flags' : { 'public' },
'url' : 'extensions/ARB/WGL_ARB_create_context_robustness.txt',
},
'WGL_ARB_extensions_string' : {
'arbnumber' : 8,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/WGL_ARB_extensions_string.txt',
},
'WGL_ARB_make_current_read' : {
'arbnumber' : 10,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/WGL_ARB_make_current_read.txt',
},
'WGL_ARB_pbuffer' : {
'arbnumber' : 11,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/WGL_ARB_pbuffer.txt',
},
'WGL_ARB_pixel_format' : {
'arbnumber' : 9,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/WGL_ARB_pixel_format.txt',
},
'WGL_ARB_render_texture' : {
'arbnumber' : 20,
'flags' : { 'public' },
'supporters' : { 'ARB' },
'url' : 'extensions/ARB/WGL_ARB_render_texture.txt',
},
'WGL_ARB_robustness_application_isolation' : {
'arbnumber' : 143,
'flags' : { 'public' },
'url' : 'extensions/ARB/WGL_ARB_robustness_application_isolation.txt',
'alias' : { 'WGL_ARB_robustness_share_group_isolation' },
},
'WGL_ATI_pixel_format_float' : {
'number' : 278,
'flags' : { 'public' },
'supporters' : { 'ATI' },
'url' : 'extensions/ATI/WGL_ATI_pixel_format_float.txt',
},
'WGL_EXT_colorspace' : {
'number' : 498,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/WGL_EXT_colorspace.txt',
},
'WGL_EXT_create_context_es2_profile' : {
'number' : 400,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/WGL_EXT_create_context_es2_profile.txt',
'alias' : { 'WGL_EXT_create_context_es_profile' },
},
'WGL_EXT_depth_float' : {
'number' : 177,
'flags' : { 'public' },
'supporters' : { 'INGR' },
'url' : 'extensions/EXT/WGL_EXT_depth_float.txt',
},
'WGL_EXT_display_color_table' : {
'number' : 167,
'flags' : { 'public' },
'url' : 'extensions/EXT/WGL_EXT_display_color_table.txt',
},
'WGL_EXT_extensions_string' : {
'number' : 168,
'flags' : { 'public' },
'supporters' : { 'INGR', 'SGI' },
'url' : 'extensions/EXT/WGL_EXT_extensions_string.txt',
},
'WGL_EXT_make_current_read' : {
'number' : 169,
'flags' : { 'public' },
'supporters' : { 'INGR', 'SGI' },
'url' : 'extensions/EXT/WGL_EXT_make_current_read.txt',
},
'WGL_EXT_multisample' : {
'number' : 209,
'flags' : { 'public' },
'url' : 'extensions/EXT/WGL_EXT_multisample.txt',
'alias' : { 'GL_EXT_multisample' },
},
'WGL_EXT_pbuffer' : {
'number' : 171,
'flags' : { 'public' },
'supporters' : { 'INGR', 'SGI' },
'url' : 'extensions/EXT/WGL_EXT_pbuffer.txt',
},
'WGL_EXT_pixel_format' : {
'number' : 170,
'flags' : { 'public' },
'supporters' : { 'INGR', 'SGI' },
'url' : 'extensions/EXT/WGL_EXT_pixel_format.txt',
},
'WGL_EXT_swap_control' : {
'number' : 172,
'flags' : { 'public' },
'supporters' : { 'INGR', 'SGI' },
'url' : 'extensions/EXT/WGL_EXT_swap_control.txt',
},
'WGL_EXT_swap_control_tear' : {
'number' : 415,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/EXT/WGL_EXT_swap_control_tear.txt',
},
'GL_EXT_clip_control' : {
'esnumber' : 290,
'flags' : { 'public' },
'supporters' : { 'MESA' },
'url' : 'extensions/EXT/EXT_clip_control.txt',
},
'WGL_I3D_digital_video_control' : {
'number' : 250,
'flags' : { 'public' },
'supporters' : { 'I3D' },
'url' : 'extensions/I3D/WGL_I3D_digital_video_control.txt',
},
'WGL_I3D_gamma' : {
'number' : 251,
'flags' : { 'public' },
'supporters' : { 'I3D' },
'url' : 'extensions/I3D/WGL_I3D_gamma.txt',
},
'WGL_I3D_genlock' : {
'number' : 252,
'flags' : { 'public' },
'supporters' : { 'I3D' },
'url' : 'extensions/I3D/WGL_I3D_genlock.txt',
},
'WGL_I3D_image_buffer' : {
'number' : 253,
'flags' : { 'public' },
'supporters' : { 'I3D' },
'url' : 'extensions/I3D/WGL_I3D_image_buffer.txt',
},
'WGL_I3D_swap_frame_lock' : {
'number' : 254,
'flags' : { 'public' },
'supporters' : { 'I3D' },
'url' : 'extensions/I3D/WGL_I3D_swap_frame_lock.txt',
},
'WGL_I3D_swap_frame_usage' : {
'number' : 255,
'flags' : { 'public' },
'supporters' : { 'I3D' },
'url' : 'extensions/I3D/WGL_I3D_swap_frame_usage.txt',
},
'GL_WIN_phong_shading' : {
'number' : 113,
'flags' : { 'public' },
'supporters' : { 'MS' },
'url' : 'extensions/WIN/WIN_phong_shading.txt',
},
'GL_WIN_scene_markerXXX' : {
'flags' : { 'obsolete' },
'url' : 'extensions/WIN/WIN_scene_markerXXX.txt',
},
'GL_WIN_specular_fog' : {
'number' : 114,
'flags' : { 'public' },
'supporters' : { 'MS' },
'url' : 'extensions/WIN/WIN_specular_fog.txt',
},
'WGL_NV_DX_interop' : {
'number' : 407,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/WGL_NV_DX_interop.txt',
},
'WGL_NV_DX_interop2' : {
'number' : 412,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/WGL_NV_DX_interop2.txt',
},
'WGL_NV_delay_before_swap' : {
'number' : 436,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/WGL_NV_delay_before_swap.txt',
},
'WGL_NV_gpu_affinity' : {
'number' : 355,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/WGL_NV_gpu_affinity.txt',
},
'WGL_NV_render_depth_texture' : {
'number' : 263,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/WGL_NV_render_depth_texture.txt',
},
'WGL_NV_render_texture_rectangle' : {
'number' : 264,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/WGL_NV_render_texture_rectangle.txt',
},
'WGL_NV_swap_group' : {
'number' : 351,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/WGL_NV_swap_group.txt',
},
'WGL_NV_video_output' : {
'number' : 349,
'flags' : { 'public' },
'supporters' : { 'NVIDIA' },
'url' : 'extensions/NV/WGL_NV_video_output.txt',
},
'WGL_OML_sync_control' : {
'number' : 242,
'flags' : { 'public' },
'supporters' : { 'KHR' },
'url' : 'extensions/OML/WGL_OML_sync_control.txt',
},
}
| 1.21875 | 1 |
uncertainty_toolbox/data.py | rebeccadavidsson/uncertainty-toolbox | 1 | 12791376 | <filename>uncertainty_toolbox/data.py
"""
Code for importing and generating data.
"""
import numpy as np
def synthetic_arange_random(num_points=10):
"""
Simple dataset of evenly spaced points and identity function (with some
randomization)
"""
y_true = np.arange(num_points)
y_pred = np.arange(num_points) + np.random.random((num_points,))
y_std = np.abs(y_true - y_pred) + 0.1 * np.random.random((num_points,))
return (y_pred, y_std, y_true)
def synthetic_sine_heteroscedastic(n_points=10):
"""
Return samples from "synthetic sine" heteroscedastic noisy function.
"""
bounds = [0, 15]
# x = np.random.uniform(bounds[0], bounds[1], n_points)
x = np.linspace(bounds[0], bounds[1], n_points)
f = np.sin(x)
std = 0.01 + np.abs(x - 5.0) / 10.0
noise = np.random.normal(scale=std)
y = f + noise
return f, std, y, x
def curvy_cosine(x):
"""
Curvy cosine function.
Parameters
----------
x : ndarray
2d numpy ndarray.
"""
flat_neg_cos = np.sum(-1 * np.cos(x), 1) / x.shape[1]
curvy_cos = flat_neg_cos + 0.2 * np.linalg.norm(x, axis=1)
curvy_cos = curvy_cos.reshape(-1, 1)
return curvy_cos
| 3.125 | 3 |
python/frame.py | ramity/apexcv | 13 | 12791377 | <reponame>ramity/apexcv
import os
import numpy as np
class Frame:
path = ""
frameNumber = 1
image
cols = 0
rows = 0
# default configured opencv settings
simpleBlurAmount = 25
thresholdType = cv2.THRESH_BINARY
bilateralKernelSize = 9
bilateralSigmaSpace = 9
countourRetrivalMode = cv2.RETR_LIST
contourApproximationMethod = cv2.CHAIN_APPROX_SIMPLE
contourLayers = -1
contourColor = (0, 0, 255)
contourBorderSize = 1
def __init__(self, frameNumber, path, image):
self.frameNumber = frameNumber
self.path = path
self.image = image
self.cols = image.shape[0]
self.rows = image.shape[1]
def getSubRegion(self, x, y, w, h):
x1 = x
x2 = x + w
y1 = y
y2 = y + h
return self.image[y1:y2, x1:x2]
def getGrayscale(self):
return cv2.cvtColor(self.image, cv2.COLOR_RGB2GRAY)
def getGrayscaleSubRegion(self, x, y, w, h):
x1 = x
x2 = x + w
y1 = y
y2 = y + h
image = self.image[y1:y2, x1:x2]
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
def getSimpleBlur(self):
kernel = np.ones((5, 5), np.float32) / self.simpleBlurAmount
return cv2.filter2D(self.image, -1, kernel)
def getSimpleBlurSubRegion(self, x, y, w, h):
x1 = x
x2 = x + w
y1 = y
y2 = y + h
image = self.image[y1:y2, x1:x2]
kernel = np.ones((5, 5), np.float32) / self.simpleBlurAmount
return cv2.filter2D(image, -1, kernel)
def getThreshold(self, low, high):
return cv2.threshold(self.image, low, high, self.thresholdType)
def getThresholdSubRegion(self, low, high, x, y, w, h):
x1 = x
x2 = x + w
y1 = y
y2 = y + h
image = self.image[y1:y2, x1:x2]
return cv2.GaussianBlur(image, (5,5), 0)
def getBilateral(self):
return cv2.adaptiveBilateralFilter(self.image, self.bilateralKernelSize, self.bilateralSigmaSpace)
def getBilateralSubRegion(self, x, y, w, h):
x1 = x
x2 = x + w
y1 = y
y2 = y + h
image = self.image[y1:y2, x1:x2]
return cv2.adaptiveBilateralFilter(image, self.bilateralKernelSize, self.bilateralSigmaSpace)
def getContours(self):
gray = self.getGrayscale()
return cv2.findContours(gray, self.countourRetrivalMode, self.contourApproximationMethod)
def getContoursOverlayImage(self):
contours = self.getContours()
if(contours == None):
return self.image
else:
return cv2.drawContours(self.image, contours, self.contourLayers, self.contourColor, self.contourBorderSize)
def getContoursSubRegion(self, x, y, w, h):
gray = self.getGrayscaleSubRegion(x, y, w, h)
return cv2.findContours(gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
def getContoursOverlayImage(self, x, y, w, h):
contours = self.getContoursSubRegion(x, y, w, h)
if(contours == None):
return self.getSubRegion(x, y, w, h)
else:
return cv2.drawContours(self.getSubRegion(x, y, w, h), contours, self.contourLayers, self.contourColor, self.contourBorderSize)
| 2.484375 | 2 |
external_tools/src/main/python/images/CopyOnlyFilesSpecifiedInSolr.py | amccoy95/PhenotypeData | 1 | 12791378 | <filename>external_tools/src/main/python/images/CopyOnlyFilesSpecifiedInSolr.py
#!/usr/bin/python
"""this program gets the download_file_paths (http mousephenotype uris) from the experiment core and then downloads the images"""
import os
import requests
import json
import sys
import os.path
import sys
import argparse
import mysql.connector
import shutil
from common import splitString
from database import getDbConnection
from OmeroPropertiesParser import OmeroPropertiesParser
responseFailed=0
numberOfImageDownloadAttemps=0
totalNumberOfImagesWeHave=0
numFoundInSolr=0
uniqueUris=set()
def main(argv):
parser = argparse.ArgumentParser(
description='Get the download_file_paths (http mousephenotype uris) from the experiment core and then downloads the images'
)
parser.add_argument('-d', '--rootDestinationDir', dest='rootDestinationDir',
help='Directory for root of destination to store images' )
parser.add_argument('-s', '--rootSolrUrl', dest='rootSolrUrl',
help='URL to root of solr index'
)
parser.add_argument('-H', '--host', dest='komp2Host',
help='Hostname for server hosting komp2 db'
)
parser.add_argument('-p', '--port', dest='komp2Port',
help='Port by which to connect to komp2 db'
)
parser.add_argument('-u', '--user', dest='komp2User',
help='Username for connecting to komp2 db'
)
parser.add_argument('-db', '--database', dest='komp2Db',
help='Database to connect to for komp2db'
)
parser.add_argument('--pass', dest='komp2Pass',
help='Password for <PASSWORD>'
)
parser.add_argument('--profile', dest='profile', default='dev',
help='profile from which to read config: dev, prod, live, ...')
args = parser.parse_args()
# Get values from property file and use as defaults that can be overridden
# by command line parameters
try:
pp = OmeroPropertiesParser(args.profile)
omeroProps = pp.getOmeroProps()
except:
omeroProps = {}
rootSolrUrl = args.rootSolrUrl if args.rootSolrUrl <> None else omeroProps['solrurl']
komp2Host = args.komp2Host if args.komp2Host<>None else omeroProps['komp2host']
komp2Port = args.komp2Port if args.komp2Port<>None else omeroProps['komp2port']
komp2db = args.komp2Db if args.komp2Db<>None else omeroProps['komp2db']
komp2User = args.komp2User if args.komp2User<>None else omeroProps['komp2user']
komp2Pass = args.komp2Pass if args.komp2Pass<>None else omeroProps['komp2pass']
rootDestinationDir = args.rootDestinationDir if args.rootDestinationDir<>None else omeroProps['rootdestinationdir']
#note cant split this url over a few lines as puts in newlines into url which doesn't work
#solrQuery="""experiment/select?q=observation_type:image_record&fq=download_file_path:(download_file_path:*bhjlk01.jax.org/images/IMPC_ALZ_001/*%20AND%20!download_file_path:*.mov)&fl=id,download_file_path,phenotyping_center,pipeline_stable_id,procedure_stable_id,datasource_name,parameter_stable_id&wt=json&indent=on&rows=10000000"""
solrQuery="""experiment/select?q=observation_type:image_record&fq=(download_file_path:*mousephenotype.org*%20AND%20!download_file_path:*.mov)&fl=id,download_file_path,phenotyping_center,pipeline_stable_id,procedure_stable_id,datasource_name,parameter_stable_id&wt=json&indent=on&rows=10000000"""
print("running python image copy script for impc images")
print 'rootDestinationDir is "', rootDestinationDir
solrUrl=rootSolrUrl+solrQuery;
print 'solrUrl', solrUrl
cnx=getDbConnection(komp2Host, komp2Port, komp2db, komp2User, komp2Pass)
runWithSolrAsDataSource(solrUrl, cnx, rootDestinationDir)
def runWithSolrAsDataSource(solrUrl,cnx, rootDestinationDir):
"""
need to get these passed in as arguments - the host and db name etc for jenkins to run
first get the list of download urls and the data source, experiment, procdure and parameter and observation id for the images
"""
v = json.loads(requests.get(solrUrl).text)
docs=v['response']['docs']
numFoundInSolr=v['response']['numFound']
for doc in docs:
download_file_path=doc['download_file_path']
datasource_id=doc['datasource_name']
phenotyping_center=doc['phenotyping_center']
#experiment=doc['experiment']
pipeline_stable_id=doc['pipeline_stable_id']
observation_id=doc['id']
procedure_stable_id=doc['procedure_stable_id']
parameter_stable_id=doc['parameter_stable_id']
processFile(cnx, observation_id, rootDestinationDir,phenotyping_center,pipeline_stable_id, procedure_stable_id, parameter_stable_id, download_file_path)
print 'number found in solr='+str(numFoundInSolr)+' number of failed responses='+str(responseFailed)+' number of requests='+str(numberOfImageDownloadAttemps)+' total totalNumberOfImagesWeHave='+str(totalNumberOfImagesWeHave)
cnx.commit()
cnx.close()
def createDestinationFilePath(rootDestinationDir, phenotyping_center, pipeline_stable_id, procedure, parameter, download_file_path):
directory="/".join([rootDestinationDir,phenotyping_center, pipeline_stable_id,procedure,parameter])
return directory
def processFile(cnx, observation_id, rootDestinationDir, phenotyping_center,pipeline_stable_id, procedure, parameter, downloadFilePath):
global totalNumberOfImagesWeHave
global responseFailed
global numberOfImageDownloadAttemps
directory = createDestinationFilePath(rootDestinationDir, phenotyping_center, pipeline_stable_id, procedure,parameter, downloadFilePath)
#print "directory "+str(directory)
dstfilename=directory+"/"+str(downloadFilePath.split('/')[-1])
#print "dstfilename="+str(dstfilename)
destPath=dstfilename.replace("/nfs/komp2/web/images/impc/","/nfs/komp2/web/images/clean/impc/")
#print "replaced="+destPath
#/nfs/komp2/web/images/impc/MRC Harwell/HRWL_001/IMPC_XRY_001/IMPC_XRY_034_001/114182.dcm
# new file paths are /nfs/public/ro/pheno-archive-images/images/impc
if dstfilename in uniqueUris:
print '---------------------!!!!!!!!!!error the filePath is not unique and has been specified before:'+dstfilename
uniqueUris.add(dstfilename)
destDirectory=os.path.dirname(destPath)
#print "destination directory for copy is "+destDirectory
if not os.path.exists(destDirectory):
os.makedirs(destDirectory)
#print 'saving file to '+destPath
if not os.path.isfile(destPath):
try:
shutil.copyfile(dstfilename,destPath)
except IOError:
print "file does not exist "+str(dstfilename)+" continuing"
totalNumberOfImagesWeHave=totalNumberOfImagesWeHave+1
if totalNumberOfImagesWeHave%1000==0 :
print "totalNumber of images we have="+str(totalNumberOfImagesWeHave)
if __name__ == "__main__":
main(sys.argv[1:])
| 2.609375 | 3 |
tests/test_PerCarRaceStatusData.py | jdamiani27/PyRaceView | 4 | 12791379 | <filename>tests/test_PerCarRaceStatusData.py
import pytest
from pyraceview.messages import MsgRaceStatus
raw = (
b"\xab\xcd*T\x02Cs\x04\x90\x90[3\x10\xcc\x89\xb8V\x00\x00"
b"\x00\x05q\xe0\x03 \x00y\x86\xb9\x00\x00$\x00\x10\x10\x06"
b"\x0e\xe8\x00\x06\x0f\x91k\xe5\x00\x00&\x00\x15\xce\x05\xb9x"
b"\x00\x07\t\x8d^\x8d\x00\x00\x16\x00\x1c\xc2\x05\xcb\xa1\x90"
b"\x05\xdd\x91L\xb3\x00\x00\x04\x00!\xece\xb1\xf9\x90\x07\x9f"
b"\x91@\xe5\x00\x00(\x00%R\x05P\xc8\x00\x05G\x8d2a\x00\x00\x0c"
b"\x00'\xe8\x05d\x01\x90\x07;\x911i\x00\x00\x08\x00+z\x05'!"
b"\x90\x07m\x91&S\x00\x00\x12\x005\x08\x05\x1ba\x90\x07m\x91"
b"\x00\xb5\x00\x00T\x006\xaa\x05\x00\xa8\x00\x07\xd1\x90\xfaC"
b"\x00\x00`\x00<\x1c\x05\xa9q\x90\t/\x8c\xe8g\x00\x00\x1c\x00=>"
b'\x053\xf1\x90\x08\x99\x90\xe2\xd3\x00\x00"\x00?(\x05jh\x00\ta'
b"\x8c\xdb\xcf\x00\x00\x18\x00@\xa6\xe0\x00\x01\x90\x08g\x90\xd6"
b"\xe7\x00\x00\x02\x00A\xec\xe0\x00\x00\x00\x08\x03\x8c\xd3\xc9"
b"\x00\x00R\x00E\xde\x00\x00\x01\x90\ta\x90\xc3\xa3\x00\x00\xbe"
b"\x00If\x00\x00\x00\x00\x08\x99\x8c\xb9\x9f\x00\x00\x14\x00p$"
b"\x00\x00\x01\x90\x08\x99\x8c\x16\xdb\x00\x00D\x00\xbeD\x05\x95!"
b"\x90\x07\x9e~\x04Q\x00\x00,\x00\xc1\xde\x05\xaf\xd9\x90\x084}"
b"\xf7\xd9\x00\x00L\x00\xdc\x88\x04\x8e`\x00\x06@i\x97\x9b\x00"
b"\x000\x10\x00\x02\x05\x8d\xa8\x00\x08\x98~\x15\x0b\x00\x00\xb0"
b"\x10\x00\x02\x05\xda\x91\x90\x08\x98}\xf1\xc7\x00\x00\x06\x10"
b"\x00\x02\x05\xc3\x10\x00\x05\xaa}\xeb\x9b\x00\x00\x1a\x10\x00"
b"\x02\x85\xb4!\x90\t\xc4}\xe0\xa1\x00\x00H\x10\x00\x02\x05\xb9y"
b"\x90\n(}\xd9\xa3\x00\x00@\x10\x00\x02\x05\xb0\xe9\x90\x08\x02m"
b"\xca\xe7\x00\x00*\x10\x00\x02\x06\x1b\xb9\x90\t\x92}\xc0\xc7\x00"
b"\x00\x10\x10\x00\x02\x04\x8b1\x90\x07:}\xaaE\x00\x00\x01\x10\x00"
b"\x02\x04\xa5\xe0\x00\x06@m\x9es\x00\x00^\x10\x00\x04\x06\x02\x18"
b"\x00\t`n\x0e{\x00\x00J\x10\x00\x04\x05\xb8a\x90\t.q\xc6\x9f\x00"
b"\x00f\x10\x00\x06\x04\xfbY\x90\x07:}\xb2\xc3\x00\x00h\x10\x00\x08"
b"\x04\xaeq\x93&\xa4q\xa9\xb9\x00\x00j\x10\x00\n\x05\xb9x\x00\x07"
b"\xd0u\xfcm\x00\x006\x10\x00\x0c\x05\x7f\xc8\x00\t\xc4u\xd0\xb7"
b"\x00\x00\x9a\x10\x00\x0c\x04\x98\x00\x00\x06@u\x8b\xdd\x00\x00"
b"\x1e\x10\x00J\x04\xa5\xe0\x00\x04~\xc5\x92\x1d\x00\x00"
)
@pytest.fixture
def status():
return MsgRaceStatus(raw).car_data[-1]
def test_car_id(status):
assert status.car_id == 30
def test_status(status):
assert status.status == 0
def test_tol_type(status):
assert status.tol_type == 1
def test_time_off_leader(status):
assert status.time_off_leader == 37.0
def test_event(status):
assert status.event == 0
def test_speed(status):
assert status.speed == 38.076
def test_throttle(status):
assert status.throttle == 0
def test_brake(status):
assert status.brake == 0
def test_rpm(status):
assert status.rpm == 2300
def test_fuel(status):
assert status.fuel == 49
def test_steer_angle(status):
assert status.steer_angle == 0
def test_lap_fraction(status):
assert status.lap_fraction == 0.5147
| 1.976563 | 2 |
python_helper/api/src/service/LogHelper.py | SamuelJansen/python_helper | 0 | 12791380 | import colorama, traceback
from python_helper.api.src.domain import Constant as c
from python_helper.api.src.service import SettingHelper, StringHelper, EnvironmentHelper, ObjectHelper, ReflectionHelper
LOG = 'LOG'
INFO = 'INFO'
SUCCESS = 'SUCCESS'
SETTING = 'SETTING'
DEBUG = 'DEBUG'
WARNING = 'WARNING'
WRAPPER = 'WRAPPER'
FAILURE = 'FAILURE'
ERROR = 'ERROR'
TEST = 'TEST'
RESET_ALL_COLORS = colorama.Style.RESET_ALL
from python_helper.api.src.helper import LogHelperHelper
global LOG_HELPER_SETTINGS
# import asyncio
# global OUTPUT_PRINT_LIST
# PRINTING = 'PRINTING'
# def loadLogger() :
# global OUTPUT_PRINT_LIST
# try :
# if ObjectHelper.isNone(OUTPUT_PRINT_LIST) :
# OUTPUT_PRINT_LIST = []
# except Exception as exception :
# OUTPUT_PRINT_LIST = []
#
# async def asyncAsyncPrintIt(itArgsAndKwargs) :
# global LOG_HELPER_SETTINGS
# while LOG_HELPER_SETTINGS[PRINTING] :
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('------------------------------------------------------------------------ awaiting ------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# print('----------------------------------------------------------------------------------------------------------------------------------------------------------')
# LOG_HELPER_SETTINGS[PRINTING] = True
# print(itArgsAndKwargs[0], **itArgsAndKwargs[1])
#
# async def asyncPrintIt(itArgsAndKwargs) :
# global LOG_HELPER_SETTINGS
# await asyncAsyncPrintIt(itArgsAndKwargs)
# LOG_HELPER_SETTINGS[PRINTING] = False
#
# async def printOutput() :
# global OUTPUT_PRINT_LIST
# while 0 < len(OUTPUT_PRINT_LIST) :
# asyncio.run(asyncPrintIt(OUTPUT_PRINT_LIST.pop(0)))
#
# def logIt(it, **kwargs) :
# global OUTPUT_PRINT_LIST
# shouldPrint = True if 0 == len(OUTPUT_PRINT_LIST) else False
# OUTPUT_PRINT_LIST.append([it, kwargs])
# if shouldPrint :
# printOutput()
# import logging
# LOGGER_INSTANCE = None
# def loadLogger(logger) :
# return logger if ObjectHelper.isNotNone(logger) else logging.getLogger(__name__)
def logIt(it, **kwargs) :
# logging.error(it, **kwargs)
# logging.log(msg=args[0], level=9)
# logger = loadLogger(LOGGER_INSTANCE)
# logger.setLevel(logging.DEBUG)
# logger.info(it)
print(it, **kwargs)
def loadSettings() :
global LOG_HELPER_SETTINGS
# logger = loadLogger(LOGGER_INSTANCE)
# logger.setLevel(logging.DEBUG)
###- logging.basicConfig(filename='example.log', encoding='utf-8', level=logging.DEBUG)
colorama.deinit()
settings = {}
settings[SettingHelper.ACTIVE_ENVIRONMENT] = SettingHelper.getActiveEnvironment()
for level in LogHelperHelper.LEVEL_DICTIONARY :
status = EnvironmentHelper.get(level)
settings[level] = status if not status is None else c.TRUE
LOG_HELPER_SETTINGS = settings
# if PRINTING not in LOG_HELPER_SETTINGS :
# LOG_HELPER_SETTINGS[PRINTING] = False
if SettingHelper.activeEnvironmentIsLocal() :
colorama.init()
# logging.basicConfig(level=logging.DEBUG)
logIt(RESET_ALL_COLORS, end=c.NOTHING)
loadSettings()
def log(origin, message, level=LOG, exception=None, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, LOG, muteStackTrace=muteStackTrace, newLine=newLine, exception=exception)
def info(origin, message, level=INFO, exception=None, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, INFO, muteStackTrace=muteStackTrace, newLine=newLine, exception=exception)
def success(origin, message, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, SUCCESS, muteStackTrace=muteStackTrace, newLine=newLine)
def setting(origin, message, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, SETTING, muteStackTrace=muteStackTrace, newLine=newLine)
def debug(origin, message, exception=None, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, DEBUG, muteStackTrace=muteStackTrace, newLine=newLine, exception=exception)
def warning(origin, message, exception=None, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, WARNING, muteStackTrace=muteStackTrace, newLine=newLine, exception=exception)
def wraper(origin, message, exception, muteStackTrace=False, newLine=False) :
LogHelperHelper.hardLog(origin, message, exception, WRAPPER, muteStackTrace=muteStackTrace, newLine=newLine)
def failure(origin, message, exception, muteStackTrace=False, newLine=False) :
LogHelperHelper.hardLog(origin, message, exception, FAILURE, muteStackTrace=muteStackTrace, newLine=newLine)
def error(origin, message, exception, muteStackTrace=False, newLine=False) :
LogHelperHelper.hardLog(origin, message, exception, ERROR, muteStackTrace=muteStackTrace, newLine=newLine)
def test(origin, message, exception=None, muteStackTrace=False, newLine=False) :
LogHelperHelper.softLog(origin, message, TEST, muteStackTrace=muteStackTrace, newLine=newLine, exception=exception)
def printLog(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(LOG, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printInfo(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(INFO, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printSuccess(message, condition=False, muteStackTrace=False, newLine=True, margin=True) :
LogHelperHelper.printMessageLog(SUCCESS, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin)
def printSetting(message, condition=False, muteStackTrace=False, newLine=True, margin=True) :
LogHelperHelper.printMessageLog(SETTING, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin)
def printDebug(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(DEBUG, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printWarning(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(WARNING, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printWarper(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(WRAPPER, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printFailure(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(FAILURE, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printError(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(ERROR, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def printTest(message, condition=False, muteStackTrace=False, newLine=True, margin=True, exception=None) :
LogHelperHelper.printMessageLog(TEST, message, condition=condition, muteStackTrace=muteStackTrace, newLine=newLine, margin=margin, exception=exception)
def prettyPython(
origin,
message,
dictionaryInstance,
quote = c.SINGLE_QUOTE,
tabCount = 0,
nullValue = c.NONE,
trueValue = c.TRUE,
falseValue = c.FALSE,
logLevel = LOG,
condition = True
) :
if condition :
stdout, stderr = EnvironmentHelper.getCurrentSoutStatus()
prettyPythonValue = StringHelper.prettyPython(
dictionaryInstance,
quote = quote,
tabCount = tabCount,
nullValue = nullValue,
trueValue = trueValue,
falseValue = falseValue,
withColors = SettingHelper.activeEnvironmentIsLocal(),
joinAtReturn = False
)
LogHelperHelper.softLog(origin, StringHelper.join([message, c.COLON_SPACE, *prettyPythonValue]), logLevel)
EnvironmentHelper.overrideSoutStatus(stdout, stderr)
def prettyJson(
origin,
message,
dictionaryInstance,
quote = c.DOUBLE_QUOTE,
tabCount = 0,
nullValue = c.NULL_VALUE,
trueValue = c.TRUE_VALUE,
falseValue = c.FALSE_VALUE,
logLevel = LOG,
condition = True
) :
if condition :
stdout, stderr = EnvironmentHelper.getCurrentSoutStatus()
prettyJsonValue = StringHelper.prettyJson(
dictionaryInstance,
quote = quote,
tabCount = tabCount,
nullValue = nullValue,
trueValue = trueValue,
falseValue = falseValue,
withColors = SettingHelper.activeEnvironmentIsLocal(),
joinAtReturn = False
)
LogHelperHelper.softLog(origin, StringHelper.join([message, c.COLON_SPACE, *prettyJsonValue]), logLevel)
EnvironmentHelper.overrideSoutStatus(stdout, stderr)
def getExceptionMessage(exception) :
if ObjectHelper.isEmpty(exception) :
return c.UNKNOWN
exceptionAsString = str(exception)
if c.NOTHING == exceptionAsString :
return ReflectionHelper.getName(exception.__class__)
else :
return exceptionAsString
def getTracebackMessage(muteStackTrace) :
tracebackMessage = c.BLANK
try :
tracebackMessage = traceback.format_exc()
except :
tracebackMessage = f'{c.NEW_LINE}'
if muteStackTrace :
return StringHelper.join(tracebackMessage.split(c.NEW_LINE)[-2:], character=c.NEW_LINE)
return LogHelperHelper.NO_TRACEBACK_PRESENT_MESSAGE if LogHelperHelper.NO_TRACEBACK_PRESENT == str(tracebackMessage) else tracebackMessage
| 1.875 | 2 |
EDX_main.py | tkcroat/EDX | 0 | 12791381 | # -*- coding: utf-8 -*-
"""
Spyder Editor
SEM_batch_conversion script
Extracts important header info into parameter log, designed to read out pertinent header information from all emsa files within a folder.
No need to convert psmsa into csv ... just always strip header when opening
Output into single log file for import into Excel or elsewhere
"""
#%% Load modules
import glob, sys, os # already run with functions
import pandas as pd
import numpy as np
if 'C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX' not in sys.path:
sys.path.append('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX')
import EDX_import_functions as EDXimport
import EDX_quant_functions as EDXquant
import EDX_plot_functions as EDXplot
import EDX_refit_tk_gui as EDXrf
import EDX_quantplotter_tk_gui as EDXqpl
#%%
# datapath = filedialog.askdirectorypwd
# initialdir="H:\\Research_data", title = "choose data directory")
filelist=glob.glob('*.psmsa')+glob.glob('*.emsa') # psmsa option
#%% Main file processing loop for emsa or psmsa parameter extraction
# Create parameters log for all SEM-EDX files (autosaved with prior backup) using parameter template
# Checks for existing EDXlogbook correlating filenames w/ sample
EDXlog= EDXimport.getparams(filelist)
EDXlog= EDXimport.getparams(filelist, reprocess=True) # alt version that reacquires params from existing files
EDXlog.to_csv('EDXparamlog.csv',index=False)
# Creation of jpg images with points/areas superimposed (from .psref and .p_s files).. jpgs directly saved
# returns df with spatial areas (automatically saved w/ backup)
SpatialAreasLog=EDXimport.processpointshoot()
#%%
# Combine files with same basename/point name (autosaves altered EDXlog with backup)
EDXlog=EDXimport.combineEDX(EDXlog)
#%% Automated background fitting of SEM-EDX spectra
# can drop or exclude files here if desired (filter of EDXlog)
# Various ways of slicing up above full parameters log list
EDXfiles=EDXlog
EDXfiles=EDXfiles[0:10][:] # grab first ten rows
EDXfiles=EDXfiles.iloc[[0]] # select single row
EDXfiles=EDXfiles[EDXfiles['Filenumber'].str.contains("\+",na=False)] # choose only summed files
EDXfiles=EDXfiles[~EDXfiles['Comments'].str.contains("exclude",na=False, case=False)] # choose only summed files
EDXfiles=EDXfiles[EDXfiles['Timeconst']>12500] # backfits fail with small timeconst
#%% Reload of existing files (if reprocessing data) from working directory
EDXlog, Backfitlog, Integlog, Peakfitlog, EDXquantparams, Interferences=EDXimport.loadprocessfiles()
#%%
Elements=EDXimport.pickelemsGUI(EDXquantparams) # interactive element selection
Elements=['S','C','Ca','O','Cr', 'FeL','Fe','Mg','Al','Si'] # meteorites
Elements=['S','C','Ca','O','Cr', 'FeL','Fe','Mg','Al','Si'] # pristine SiC
Elements=['S','C','Ca','O','Cr', 'FeL','Fe','Mg','Al','Si','PtM','PtL','PtL2','Ga','GaL'] # meteorites +FIB artifact
Elements=['N','C','O','FeL','Fe','S','Ca','Mg','Al','Si','Ti'] # refractory analogs
Elements=np.ndarray.tolist(Integlog.Element.unique())# gets prior used element set
Elements.append('PtL2')
Elements.extend(['GaL','PtM', 'Ga','PtL','PtL2'])
# Load energy ranges without peaks for background fitting (various options and can also create custom version)
Fitregionsdf=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX\\SEM_backfit_regions.csv', encoding='utf-8')
Fitregionsdf=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX\\SEM_backfit_regions_alt.csv', encoding='utf-8')
# Version for pristine grains on graphene
Fitregionsdf=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX\\SEM_backfit_regions_pristine.csv', encoding='utf-8')
# TEM version
Fitregionsdf=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX\\TEM_backfit_regions.csv', encoding='utf-8')
Fitregionsdf=pd.read_csv('SEM_backfit_regions_alt.csv', encoding='utf-8') # local version
# If any modifications were made during quant of this data, load local version stored with data
EDXquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX\\SEMquantparams.csv', encoding='utf-8')
EDXquantparams=pd.read_csv('C:\\Users\\tkc\\Documents\\Python_Scripts\\EDX\\TEMquantparams.csv', encoding='utf-8')
#%%
# Run main quant loop (not autosaved so use to_csv save below after checks)
kwargs={}
Backfitlog, Peakfitlog, Integlog= EDXimport.batchEDXquant(EDXlog, Fitregionsdf, EDXquantparams, Elements, Backfitlog, Integlog, Peakfitlog, **kwargs)
# optional kwargs for above command
kwargs.update({'redo_backfit':True}) # default false for redo, redo of integration but not of background fits; no effect on new spectra
kwargs.update({'redo_integration':False}) # defaults true (false allows skip of existing integrations and gauss peak fits
# if quant rerun w/o changing backfits (i.e. after custom mods) skip clear of backfits
kwargs.update({'clear_old_backfits':True}) # default false option to not overwrite all backgrounds in csv files (defaults True)
kwargs.update({'savegauss':False}) # optional save of gaussian fit column into spectrum's csv file; default true
# Find/ Replace subset of files (processed in alternate manner) from above log files.. refit of failed fits
Backfitlog.to_csv('Backfitparamslog.csv', index=False)
Peakfitlog.to_csv('Peakfitlog.csv', index=False)
Integlog.to_csv('Integquantlog.csv', index=False)
# After successful refit of subset of files, find/replace entries in original logbooks (saves after finishing)
Backfitlog, Peakfitlog, Integlog = EDXimport.replacelogentries(EDXlog, Backfitlog, Peakfitlog, Integlog)
#%% Run interactive EDXrefitter (if any plots, backfit points, etc. are bad)
EDXrf.launch_refitter()
EDXqpl.launch_plotter(os.getcwd())
# Redo integlog, peakfits if any backfits were changed (first reload saved changes from file)
EDXlog, Backfitlog, Integlog, Peakfitlog, EDXquantparams, Interferences=EDXimport.loadprocessfiles()
kwargs={'newback':False,'overwrite':False} # do not refit or overwrite backgrounds... use ones made with interactive refitter
Backfitlog, Peakfitlog, Integlog= EDXimport.batchEDXquant(EDXlog, Fitregionsdf, EDXquantparams, Elements, **kwargs)
# Manual save of peakfitlog and integlog are needed
Peakfitlog.to_csv('Peakfitlog.csv', index=False)
Integlog.to_csv('Integquantlog.csv', index=False)
#%% PLOTTING to check quality of background fits, peaks, etc.
EDXfiles=EDXlog[0:5] # Selecting subsets of all SEM files
# Plot counts and background over specified energy range
pkwargs={}
pkwargs.update({'xrange':'0.3-10'}) # optional x range for plot (default is 0-10? )
pkwargs.update({'backfitdf':Backfitlog}) # optional plotting of points used to create background fit
pkwargs.update({'backfitpts':False}) # skip background pts but include fits
pkwargs.update({'yrange':[-500,3000]}) # optional y range for plot.. defaults to data range
pkwargs.update({'plotelems':['O','Mg','S','Si', 'Ca', 'Fe', 'FeL']}) # list of elements to label on plots
pkwargs.update({'plotelems':['O','Mg','Si', 'Fe']})
pkwargs.update({'PDFname':'counts_report_9Jan18.pdf'}) # alt save name (defaults to countsback_report.pdf)
pkwargs.update({'savgol':True}) # include savgol differentiated plot (default False)
EDXplot.reportcounts(EDXfiles, EDXquantparams, **pkwargs)
EDXplot.reportcounts(EDXlog, EDXquantparams, **pkwargs)
# plot report with subtracted counts and optionally gaussian peak fits (if they exist)
EDXplot.reportSEMpeaks(EDXfiles, plotelems, SEMquantparams, addgauss=True, PDFname='peak_report.pdf')
# TODO Place center of integration on plot for significant peaks
# plot subtracted data around major elements including corrected counts
EDXplot.reportsubdatamajor(EDXfiles, Integquantlog, PDFname='Subcounts_major_report.pdf')
reportcountspeakfits(EDXfiles, Fitregionsdf, plotrange, plotelems, SEMquantparams)
# Now proceed to EDX_quant_main for interference adjustments, \\osition calcs, etc.
# Renaming of troublesome p_s and psmsa files (i.e. containing blanks)
psfiles=glob.glob('*.p_s')
badpsfiles=[i for i in psfiles if '\xa0' in i]
for i, psfile in enumerate(badpsfiles):
EDXimport.renamePSset(psfile, '\xa0', '_')
train=pd.read_csv('Backfit_training.csv')
| 2.34375 | 2 |
Code/AnalyseFittedData_Field_at_He.py | nancy-aggarwal/Characterization-of-ARIADNE-source-mass-rotor_Github | 0 | 12791382 | <gh_stars>0
# %%
from scipy.io import loadmat
import numpy as np
# from scipy.optimize import minimize
from datetime import datetime
now = datetime.now
import matplotlib.pyplot as plt
import matplotlib as mpl
import time
import os
import pickle
import json
from scipy import fft
from scipy.signal.windows import hann
from copy import deepcopy
from scipy.stats import chi2
# %%
SaveFitFigs = True
# SaveFitData = True
dpiN = 1000
dark_plots = True
n_sig = 8
n_print_sigfigs = 3
if dark_plots:
dark='darkbg/'
q = mpl.rc_params_from_file('matplotlibrc_dark')
else:
dark = 'whitebg/'
mpl.rcParams.update(mpl.rcParamsDefault)
SavePlotDir_Exp2 = '../Results/2021-12-21_threesigfigs/Exp2/'+dark+'FittingFigs/'
# SaveDataDir_Exp2 = '../Results/2021-11-16/Exp2/'+'Pickles/'
LoadDataDir_Exp2 = '../Results/2021-12-20/Exp2/Pickles/'#SaveDataDir_Exp2 # The other notebook stored the pickle in the same folder
if SaveFitFigs:
if not os.path.exists(SavePlotDir_Exp2):
os.makedirs(SavePlotDir_Exp2)
# if SaveFitData:
# if not os.path.exists(SaveDataDir_Exp2):
# os.makedirs(SaveDataDir_Exp2)
# %%
if dark_plots:
mpl.rcParams.update(q)
# %matplotlib inline
mpl.rcParams.update({
#'legend.borderpad': 0.3,
#'legend.borderaxespad': 0.25,
# 'legend.columnspacing': 0.6,
# 'legend.handlelength': 0.7,
#'legend.handleheight': 0.4,
#'legend.handletextpad': 0.2,
# 'legend.labelspacing': 0.45,
# 'text.usetex': True,
'font.size':13,
})
else:
# %matplotlib inline
# mpl.rcParams.update(mpl.rcParamsDefault)
font = {
# 'weight' : 'normal',
'size' : 15,
'family': 'Times New Roman'}
plt.rc('font', **font)
# mpl.rcParams.update({'font.family':'serif'})
# %%
# %load_ext autoreload
# %%
from B_calc_script import FieldAtAnyLocation
from B_calc_script import signif
# %%
# %autoreload 2
# %%
# %% [markdown]
# # Load data
# %%
Exp2_data_filename = LoadDataDir_Exp2+'Exp2_cut_averaged_data.pk'
# %%
with open(Exp2_data_filename,'rb') as file_obj:
Exp2_data_cut = pickle.load(file_obj)
# %% [markdown]
# ## Load parameters ##
# %%
nu = 5
# %%
with open('../Params/Exp2_dimensions_and_locations.json', 'r') as fp:
params_dims_locs = json.load(fp)
# %%
params_dims_locs
# %%
rtr_dims = params_dims_locs['rotor_dims']
for key in rtr_dims:
rtr_dims[key] = signif(rtr_dims[key],n_sig)
# %%
He_sample_locations =deepcopy(params_dims_locs['3He locations'])
for Hekey in He_sample_locations.keys():
string_to_parse = params_dims_locs['3He locations'][Hekey]['location']
He_sample_locations[Hekey]['location']=eval(string_to_parse.replace('rotor_dims','rtr_dims').replace('D_wheel_sample','params_dims_locs[\'D_wheel_sample\']'))
# %%
# with open('../Params/'+'FittedDipoles_{}Hz_'.format(nu)+'3sources.pk','rb') as filehandle:
# Exp2_Opt_Params_3_sources = pickle.load(filehandle)
# Exp2_Opt_Params_3_sources=Exp2_Opt_Params_3_sources.tolist()
with open('../Params/'+'FittedDipoles_{}Hz_'.format(nu)+'3sources.json','r',encoding = 'utf-8') as filehandle:
Exp2_Opt_Params_3_sources = json.loads(filehandle.read())
# %%
Exp2_Opt_Params_3_sources
# %%
Exp2_Opt_Params_3_sources_noDC_noBar = Exp2_Opt_Params_3_sources[:-5]
Exp2_Opt_Params_3_sources_zeroDC = Exp2_Opt_Params_3_sources_noDC_noBar+ [0,0,0] + [Exp2_Opt_Params_3_sources[-1]]
# %%
# with open('../Params/Params_4sources.pk','rb') as filehandle:
# Exp2_Opt_Params_4_sources = pickle.load(filehandle)
# %% [markdown]
# # Calculate field at sample location #
# %%
Sample_settings = {
'rotor dimensions':rtr_dims,
'sensor locations':He_sample_locations,
# 'bar location':0,
# 'DC shifts':[DC_shift_AVx,DC_shift_AVy,DC_shift_AWy,DC_shift_AWz]
# 'deltaB':1 #picoTesla
}
# %%
Data_At_Sample = {
'theta':np.concatenate([Exp2_data_cut['theta avg'][nu]
# ,360+Exp2_data_cut['theta avg'][nu]
]),
#theta positive for ac, negative for clockwise
'B':{
'3He 1':{
'Z':np.array([]),
'Y':np.array([]),
'X':np.array([])
},
}
}
# %%
# nowtext = now().strftime("%Y%m%d%H%M")
nowtext = '_15font'
fitplotfilename = SavePlotDir_Exp2+'FittedData_at_sample_{}Hz'.format(nu)+nowtext+'.png'
# fitdatafilename = SaveDataDir_Exp2+'FittedData_at_sample_{}Hz'.format(nu)+nowtext+'.pk'
Exp2_optimization_settings = {
'print':True,
'number of sources':3,
'location dimensions':3,
'moment dimensions':3,
'location coordinate system':'polar',
'moment coordinate system':'polar',
# 'chi tolerance':10,
'optimize DC shifts':True,
'optimize bar location':True,
'significant figures':n_sig
}
Exp2_plot_settings = {
'plot':True,
# 'memo':'{} Hz (AV X&Y inverted)'.format(nu),
# 'memo':'{} Hz'.format(nu),
'doubleplot':False,
'saveplot':SaveFitFigs,
'dpi':dpiN,
'figname':fitplotfilename,
'print sigfigs':n_print_sigfigs
}
Exp2_save_settings ={
'save fit data':False,
# 'fit data filename':fitdatafilename
}
Exp2_all_settings = {
'experiment settings':Sample_settings,
'data':Data_At_Sample,
'optimization settings':Exp2_optimization_settings,
'plot settings':Exp2_plot_settings,
'save settings':Exp2_save_settings
}
Exp2_Opt_Params = Exp2_Opt_Params_3_sources_zeroDC
field_at_sample = FieldAtAnyLocation(Exp2_Opt_Params,Exp2_all_settings)
# %% [markdown]
# # FFT Field at Sample Location
# %%
n_reps = 50
# %%
Field_At_He_location_for_FFT = {}
Field_At_He_location_for_FFT['time'] = Data_At_Sample['theta']/360/nu
Field_At_He_location_for_FFT['B time domain'] = {}
Field_At_He_location_for_FFT['B freq domain'] = {}
numsamples = n_reps*Field_At_He_location_for_FFT['time'].size
binsize = Field_At_He_location_for_FFT['time'][2] - Field_At_He_location_for_FFT['time'][1]
Field_At_He_location_for_FFT['freq']= fft.rfftfreq(n = numsamples,d=binsize)
# %%
for Hekey in field_at_sample.keys():
Field_At_He_location_for_FFT['B time domain'][Hekey] = {}
Field_At_He_location_for_FFT['B freq domain'][Hekey] = {}
for axiskey in field_at_sample[Hekey].keys():
Field_At_He_location_for_FFT['B time domain'][Hekey][axiskey] = np.tile(field_at_sample[Hekey][axiskey],n_reps)
Field_At_He_location_for_FFT['B freq domain'][Hekey][axiskey] = 4*fft.rfft(Field_At_He_location_for_FFT['B time domain'][Hekey][axiskey]*hann(numsamples),norm = "forward")
# %%
indnu = (np.abs(Field_At_He_location_for_FFT['freq']-nu)<0.5*nu)
ind11nu = (np.abs(Field_At_He_location_for_FFT['freq']-11*nu)<0.5*nu)
FFT_amp_table = {}
# FFT_amp_table['frequency'] .append(nu)
FFT_amp_table[nu] = {}
FFT_amp_table[11*nu] = {}
B_max_table = {}
for Hekey in Field_At_He_location_for_FFT['B freq domain'].keys():
FFT_amp_table[nu][Hekey] = {}
FFT_amp_table[11*nu][Hekey] = {}
B_max_table[Hekey] = {}
for axiskey in Field_At_He_location_for_FFT['B freq domain'][Hekey].keys():
FFT_amp_table[nu][Hekey][axiskey] = np.abs(Field_At_He_location_for_FFT['B freq domain'][Hekey][axiskey][indnu]).max()
FFT_amp_table[11*nu][Hekey][axiskey] = np.abs(Field_At_He_location_for_FFT['B freq domain'][Hekey][axiskey][ind11nu]).max()
B_max_table[Hekey][axiskey] = np.abs(Field_At_He_location_for_FFT['B freq domain'][Hekey][axiskey]).max()
# %%
print("FFT Amplitudes calculated at locations inside He spheroid")
for freq in FFT_amp_table.keys():
print('{} Hz'.format(freq), end = "\n")
print("-------------------")
print("Axis |", end = " ")
for Hekey in FFT_amp_table[freq].keys():
print('Sensor {} |'.format(Hekey), end = " ")
print("\n")
for axiskey in FFT_amp_table[freq][Hekey].keys():
print(" "+axiskey+" |",end="")
for Hekey in FFT_amp_table[freq].keys():
print(" {:0.1f} |".format(FFT_amp_table[freq][Hekey][axiskey]),end="")
print("\n")
print("-------------------")
# %%
for Hekey in Field_At_He_location_for_FFT['B freq domain'].keys():
plt.figure()
i_num = 0
B_max = 0
max_at_nu = 0
max_at_11nu = 0
for axiskey in Field_At_He_location_for_FFT['B freq domain'][Hekey].keys():
plt.semilogx(Field_At_He_location_for_FFT['freq']
,np.abs(Field_At_He_location_for_FFT['B freq domain'][Hekey][axiskey])
,label = axiskey
,alpha = 1-i_num/3)
i_num +=1
B_max = max(B_max, B_max_table[Hekey][axiskey])
max_at_nu = max(max_at_nu, FFT_amp_table[nu][Hekey][axiskey])
max_at_11nu = max(max_at_11nu, FFT_amp_table[11*nu][Hekey][axiskey])
plt.annotate('$f_\mathrm{rot}$',xy = (nu,max_at_nu),xytext=(nu,B_max*1.4),\
arrowprops=dict(color='red',alpha=0.5,width = 1.5,headwidth=6, shrink=0.),\
horizontalalignment='center')
plt.annotate('$11f_\mathrm{rot}$',xy = (11*nu,max_at_11nu),xytext=(11*nu,B_max*1.4),\
arrowprops=dict(color='fuchsia',alpha=0.5,width = 1.5,headwidth=6,shrink=0.),\
horizontalalignment='center')
plt.ylim(0,B_max*1.5)
plt.xlabel('Frequency (Hz)')
plt.title('Contribution of impurities to field at $^3$He location {}\n ({:0.1f} s measurement duration)'.format(Hekey,n_reps*Field_At_He_location_for_FFT['time'][-1]))
plt.ylabel('Magnetic field (pT)')
plt.grid()
plt.legend(loc = 'upper left')
if SaveFitFigs:
plt.savefig(SavePlotDir_Exp2+'BFFT_at_sample_{}.png'.format(Hekey),bbox_inches = 'tight',dpi = dpiN)
# %%
for axiskey in Field_At_He_location_for_FFT['B freq domain']['1'].keys():
plt.figure()
i_num = 0
B_max = 0
max_at_nu = 0
max_at_11nu = 0
for Hekey in Field_At_He_location_for_FFT['B freq domain'].keys():
plt.semilogx(Field_At_He_location_for_FFT['freq']
,np.abs(Field_At_He_location_for_FFT['B freq domain'][Hekey][axiskey])
,label = Hekey
,alpha = 1-i_num/4)
i_num +=1
B_max = max(B_max, B_max_table[Hekey][axiskey])
max_at_nu = max(max_at_nu, FFT_amp_table[nu][Hekey][axiskey])
max_at_11nu = max(max_at_11nu, FFT_amp_table[11*nu][Hekey][axiskey])
plt.annotate('$f_\mathrm{rot}$',xy = (nu,max_at_nu),xytext=(nu,B_max*1.4),\
arrowprops=dict(color='red',alpha=0.5,width = 1.5,headwidth=6, shrink=0.),\
horizontalalignment='center')
plt.annotate('$11f_\mathrm{rot}$',xy = (11*nu,max_at_11nu),xytext=(11*nu,B_max*1.4),\
arrowprops=dict(color='fuchsia',alpha=0.5,width = 1.5,headwidth=6,shrink=0.),\
horizontalalignment='center')
plt.ylim(0,B_max*1.5)
plt.xlabel('Frequency (Hz)')
plt.title('Contribution of impurities to field at $^3$He location \n ({:0.1f} s measurement duration)'.format(n_reps*Field_At_He_location_for_FFT['time'][-1]))
plt.ylabel('Magnetic field, {} component (pT)'.format(axiskey))
plt.grid()
plt.legend(loc = 'upper left')
if SaveFitFigs:
plt.savefig(SavePlotDir_Exp2+'BFFT_at_sample_{}_component.png'.format(axiskey),bbox_inches = 'tight',dpi = dpiN)
# %%
# %%
| 1.90625 | 2 |
specification/tools/VMHprocessMappings1.py | iptc/video-metadata-hub | 1 | 12791383 | #!/usr/bin/env python3
"""
Python script for retrieving IPTC Video Metadata Hub mapping data from a Google sheet
The retrieved data are transformed in HTML as saved as HTML page.
For IPTC-internal use
Creator: <NAME>
History:
2016-11-25 mws: project started, download and HTML output ok
2020-06-15 BQ: Updated and checked into GitHub
"""
from __future__ import print_function
import pickle
import os
import sys
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from lxml import etree as ET
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
CLIENT_SECRET_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'client_secret.json')
APPLICATION_NAME = 'Video Metadata Hub Documentation Generator'
# Constant values
StdVersion = "1.3"
HeaderAppendix = "" # could be " - D-R-A-F-T - "
IPTCApprovalDate = "13 May 2020"
IPTCRevisionDate = "13 May 2020"
CopyrightYear = "2020"
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CLIENT_SECRET_FILE, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return creds
def createSpecificMapping(valuesProp, headingtext1, headingtext2, findmoreaturl, mapIdx, filename):
# create the HTML document
xroot = ET.Element('html')
head = ET.SubElement(xroot, 'head')
title = ET.SubElement(head, 'title')
title.text = 'Video Metadata Hub Mapping'
metachset = ET.SubElement(head, 'meta', {'http-equiv': "Content-Type", 'content': "text/html; charset=utf-8"})
csslink1 = ET.SubElement(head, 'link', {'type': 'text/css', 'rel': 'stylesheet', 'href': 'iptcspecs1.css'})
body = ET.SubElement(xroot, 'body')
pageheader = ET.SubElement(body, 'h1', {'class':'pageheader'})
iptcanc = ET.SubElement(pageheader, 'a', {'href':'https://iptc.org'})
iptcimg = ET.SubElement(iptcanc, 'img', {'src':'https://iptc.org/download/resources/logos/iptc-gr_70x70.jpg', 'align':'left', 'border':'0'})
pageheader.text = headingtext1
seeotherdoc1 = ET.SubElement(body, 'p', {'class':'note1'})
seeotherdoc1.text = 'Return to '
seeotherdoc1link1 = ET.SubElement(seeotherdoc1, 'a', {'href':'IPTC-VideoMetadataHub-mapping-Rec_'+StdVersion+'.html'})
seeotherdoc1link1.text = 'all recommended mappings of the Video Metadata Hub.'
seeotherdoc2 = ET.SubElement(body, 'p', {'class':'note1'})
seeotherdoc2.text = 'See the '
seeotherdoc1link2 = ET.SubElement(seeotherdoc2, 'a', {'href':'IPTC-VideoMetadataHub-props-Rec_'+StdVersion+'.html'})
seeotherdoc1link2.text = 'specification of Video Metadata Hub properties'
docdate = ET.SubElement(body, 'p', {'class':'note1'})
docdate.text = 'Mapping recommended on ' + IPTCApprovalDate + '. Document revision as of ' + IPTCRevisionDate + '.'
copyrightnotice = ET.fromstring('<p class="smallnote1">Copyright © ' + CopyrightYear + ', <a href="https://iptc.org">IPTC</a> - all rights reserved. Published under the Creative Commons Attribution 4.0 license <a href="http://creativecommons.org/licenses/by/4.0/">http://creativecommons.org/licenses/by/4.0/</a></p>')
body.append(copyrightnotice)
mappedstdnote = ET.SubElement(body, 'p', {'class':'note1'})
mappedstdnote.text = 'In this table the columns with a blue header are defined by the Video Metadata Hub, the column with the green header is defined by ' + headingtext2
propnote1 = ET.fromstring('<p class="note1">Note on the column headers:<br />EBUcore: based on the EBU Core Metadata Standard.<br />XMP: based on the ISO XMP standard.<br />PVMD: a specification of JSON properties for Photo and Video MetaData by IPTC (aka phovidmd).</p>')
body.append(propnote1)
if not valuesProp:
print('No Property data found.')
else:
table = ET.SubElement(body, 'table', {'class':'spec1 vmhmapping'})
thead = ET.SubElement(table, 'thead')
throw = ET.SubElement(thead, 'tr')
thcol1 = ET.SubElement(throw, 'th', {'class':'hdrcol1'})
thcol1.text = 'Property Group'
thcol2 = ET.SubElement(throw, 'th', {'class':'hdrcol2'})
thcol2.text = 'Property Name'
thcol3 = ET.SubElement(throw, 'th', {'class':'hdrcol3'})
thcol3.text = 'Definition / Semantics'
"""
thcol4 = ET.SubElement(throw, 'th', {'class':'hdrcol4'})
thcol4.text = 'Basic Type/Cardinality'
"""
thcol5 = ET.SubElement(throw, 'th', {'class':'hdrcol5'})
thcol5.text = 'EBUcore'
thcol6 = ET.SubElement(throw, 'th', {'class':'hdrcol6'})
thcol6.text = 'XMP'
thcol7 = ET.SubElement(throw, 'th', {'class':'hdrcol7'})
thcol7.text = 'PVMD JSON'
thcol8 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc'})
thcol8.text = headingtext2
# second row with "find more at ..." links
throw = ET.SubElement(thead, 'tr')
thcol1 = ET.SubElement(throw, 'td', {'class':'hdrcol1'})
thcol1.text = ' '
thcol2 = ET.SubElement(throw, 'td', {'class':'hdrcol2'})
thcol2.text = ' '
thcol3 = ET.SubElement(throw, 'td', {'class':'hdrcol3'})
thcol3.text = ' '
"""
thcol4 = ET.SubElement(throw, 'td', {'class':'hdrcol4'})
thcol4.text = ''
"""
moreatlink = valuesProp[0][4]
colcode = ET.fromstring(
'<td class="hdrcolIptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
moreatlink = valuesProp[0][5]
colcode = ET.fromstring(
'<td class="hdrcolIptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
moreatlink = valuesProp[0][6]
colcode = ET.fromstring(
'<td class="hdrcolIptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
moreatlink = valuesProp[0][mapIdx]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"> </td>')
throw.append(colcode)
tbody = ET.SubElement(table, 'tbody')
for rowcounter in range(2, 186):
xrow = ET.SubElement(tbody, 'tr')
teststr = valuesProp[rowcounter][0]
if teststr == 'Property Structures (PS)':
xrow.set('style', 'background-color: #009999;')
if teststr.find('PS', 0) == 0:
xrow.set('style', 'background-color: #00cccc;')
xcell1 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][0]
except:
valstr = ' '
xcell1.text = valstr
xcell2 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][1]
except:
valstr = ' '
xcell2.text = valstr
xcell3 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][2]
except:
valstr = ' '
xcell3.text = valstr
"""
xcell4 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][3]
except:
valstr = ' '
xcell4.text = valstr
"""
xcell5 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][4]
except:
valstr = ' '
xcell5.text = valstr
xcell6 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][5]
except:
valstr = ' '
xcell6.text = valstr
xcell7 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][6]
except:
valstr = ' '
xcell7.text = valstr
xcell8 = ET.SubElement(xrow, 'td', { 'class': 'bgdcolNoniptc'})
try:
valstr = valuesProp[rowcounter][mapIdx]
except:
valstr = ' '
xcell8.text = valstr
with open(filename, 'w') as file:
file.write(ET.tostring(xroot, pretty_print=True).decode())
def main():
credentials = get_credentials()
service = build('sheets', 'v4', credentials=credentials)
spreadsheetId = '1TgfvHcsbGvJqmF0iUUnaL-RAdd1lbentmb2LhcM8SDk'
rangeName = 'MappingRec 1.3.1!A4:R'
result1 = service.spreadsheets().values().get(
spreadsheetId=spreadsheetId, range=rangeName).execute()
valuesProp = result1.get('values', [])
# create the HTML document
xroot = ET.Element('html')
head = ET.SubElement(xroot, 'head')
title = ET.SubElement(head, 'title')
title.text = 'Video Metadata Hub Mapping'
metachset = ET.SubElement(head, 'meta', {'http-equiv': "Content-Type", 'content': "text/html; charset=utf-8"})
csslink1 = ET.SubElement(head, 'link', {'type': 'text/css', 'rel': 'stylesheet', 'href': 'iptcspecs1.css'})
body = ET.SubElement(xroot, 'body')
pageheader = ET.SubElement(body, 'h1', {'class':'pageheader'})
iptcanc = ET.SubElement(pageheader, 'a', {'href':'https://iptc.org'})
iptcimg = ET.SubElement(iptcanc, 'img', {'src':'https://iptc.org/download/resources/logos/iptc-gr_70x70.jpg', 'align':'left', 'border':'0'})
pageheader.text = 'IPTC Video Metadata Hub - Recommendation '+ StdVersion +' / all Mappings' + HeaderAppendix
seeotherdoc1 = ET.SubElement(body, 'p', {'class':'note1'})
seeotherdoc1.text = 'See the '
seeotherdoc1link1 = ET.SubElement(seeotherdoc1, 'a', {'href':'IPTC-VideoMetadataHub-props-Rec_'+StdVersion+'.html'})
seeotherdoc1link1.text = 'specification of Video Metadata Hub properties'
docdate = ET.SubElement(body, 'p', {'class':'note1'})
docdate.text = 'Mapping recommended on ' + IPTCApprovalDate + '. Document revision as of ' + IPTCRevisionDate + '.'
copyrightnotice = ET.fromstring('<p class="smallnote1">Copyright © '+ CopyrightYear + ', <a href="https://iptc.org">IPTC</a> - all rights reserved. Published under the Creative Commons Attribution 4.0 license <a href="http://creativecommons.org/licenses/by/4.0/">http://creativecommons.org/licenses/by/4.0/</a></p>')
body.append(copyrightnotice)
mappedstdnote = ET.SubElement(body, 'p', {'class':'note1'})
mappedstdnote.text = 'In this table the columns with a blue header are defined by the Video Metadata Hub, the columns with the green or amber headers are defined by other standards or tools.'
propnote1 = ET.fromstring('<p class="note1">Note on the column headers:<br />EBUcore: based on the EBU Core Metadata Standard.<br />XMP: based on the ISO XMP standard.<br />PVMD: a specification of JSON properties for Photo and Video MetaData by IPTC (aka phovidmd).</p>')
body.append(propnote1)
docnote1 = ET.SubElement(body, 'p', {'class':'smallnote1'})
docnote1.text = 'The header of mappings to other standards provides a link to a table including only this mapping (better for printing)'
if not valuesProp:
print('No Property data found.')
else:
table = ET.SubElement(body, 'table', {'class':'spec1 vmhmapping'})
thead = ET.SubElement(table, 'thead')
throw = ET.SubElement(thead, 'tr')
thcol1 = ET.SubElement(throw, 'th', {'class':'hdrcol1'})
thcol1.text = 'Property Group'
thcol2 = ET.SubElement(throw, 'th', {'class':'hdrcol2'})
thcol2.text = 'Property Name'
thcol3 = ET.SubElement(throw, 'th', {'class':'hdrcol3'})
thcol3.text = 'Definition / Semantics'
"""
thcol4 = ET.SubElement(throw, 'th', {'class':'hdrcol4'})
thcol4.text = 'Basic Type/Cardinality'
"""
thcol5 = ET.SubElement(throw, 'th', {'class':'hdrcol5'})
thcol5.text = 'EBUcore'
thcol6 = ET.SubElement(throw, 'th', {'class':'hdrcol6'})
thcol6.text = 'XMP'
thcol7 = ET.SubElement(throw, 'th', {'class':'hdrcol7'})
thcol7.text = 'IPTC PVMD JSON'
thcol8 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc'})
thcol8link = ET.SubElement(thcol8,'a', {'href':'IPTC-VideoMetadataHub-mapping-AppleQT-Rec_'+StdVersion+'.html'})
thcol8link.text = 'Apple Quicktime'
thcol9 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc2'})
thcol9link = ET.SubElement(thcol9,'a', {'href':'IPTC-VideoMetadataHub-mapping-MPEG7-Rec_'+StdVersion+'.html'})
thcol9link.text = 'MPEG 7'
thcol10 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc'})
thcol10link = ET.SubElement(thcol10,'a', {'href':'IPTC-VideoMetadataHub-mapping-NewsMLG2-Rec_'+StdVersion+'.html'})
thcol10link.text = 'NewsML-G2'
thcol11 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc2'})
thcol11link = ET.SubElement(thcol11,'a', {'href':'IPTC-VideoMetadataHub-mapping-PBCore21-Rec_'+StdVersion+'.html'})
thcol11link.text = 'PB Core 2.1'
thcol12 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc'})
thcol12link = ET.SubElement(thcol12,'a', {'href':'IPTC-VideoMetadataHub-mapping-SchemaOrg-Rec_'+StdVersion+'.html'})
thcol12link.text = 'Schema.org'
# new in 2018-03
thcol13 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc2'})
thcol13link = ET.SubElement(thcol13,'a', {'href':'IPTC-VideoMetadataHub-mapping-SonyXDCAM-Rec_'+StdVersion+'.html'})
thcol13link.text = 'Sony XDCAM & Planning'
thcol14 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc'})
thcol14link = ET.SubElement(thcol14,'a', {'href':'IPTC-VideoMetadataHub-mapping-Panasonic-SMPTEP2-Rec_'+StdVersion+'.html'})
thcol14link.text = 'Panasonic/SMPTE P2'
thcol15 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc2'})
thcol15link = ET.SubElement(thcol15,'a', {'href':'IPTC-VideoMetadataHub-mapping-CanonVClip-Rec_'+StdVersion+'.html'})
thcol15link.text = 'Canon VideoClip XML'
thcol16 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc'})
thcol16link = ET.SubElement(thcol16,'a', {'href':'IPTC-VideoMetadataHub-mapping-exiftool-Rec_'+StdVersion+'.html'})
thcol16link.text = 'exiftool field ids'
thcol17 = ET.SubElement(throw, 'th', {'class':'hdrcolNoniptc2'})
thcol17link = ET.SubElement(thcol17,'a', {'href':'IPTC-VideoMetadataHub-mapping-EIDR-Rec_'+StdVersion+'.html'})
thcol17link.text = 'EIDR Data Fields 2.0'
# second row with "find more at ..." links
throw = ET.SubElement(thead, 'tr')
thcol1 = ET.SubElement(throw, 'td', {'class':'hdrcol1'})
thcol1.text = ' '
thcol2 = ET.SubElement(throw, 'td', {'class':'hdrcol2'})
thcol2.text = ' '
thcol3 = ET.SubElement(throw, 'td', {'class':'hdrcol3'})
thcol3.text = ' '
"""
thcol4 = ET.SubElement(throw, 'td', {'class':'hdrcol4'})
thcol4.text = ''
"""
moreatlink = valuesProp[0][4]
colcode = ET.fromstring(
'<td class="hdrcolIptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
moreatlink = valuesProp[0][5]
colcode = ET.fromstring(
'<td class="hdrcolIptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
moreatlink = valuesProp[0][6]
colcode = ET.fromstring(
'<td class="hdrcolIptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
moreatlink = valuesProp[0][7]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][9]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][10]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][11]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][12]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][13]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][14]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][15]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][16]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc"> </td>')
throw.append(colcode)
moreatlink = valuesProp[0][17]
if moreatlink != '':
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"><a href="' + moreatlink + '" target="_blank">Find more about it at ...</a></td>')
throw.append(colcode)
else:
colcode = ET.fromstring(
'<td class="hdrcolNoniptc2"> </td>')
throw.append(colcode)
tbody = ET.SubElement(table, 'tbody')
for rowcounter in range(2, 186):
xrow = ET.SubElement(tbody, 'tr')
teststr = valuesProp[rowcounter][0]
if teststr == 'Property Structures (PS)':
xrow.set('style', 'background-color: #009999;')
if teststr.find('PS', 0) == 0:
xrow.set('style', 'background-color: #00cccc;')
xcell1 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][0]
except:
valstr = ' '
xcell1.text = valstr
xcell2 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][1]
except:
valstr = ' '
xcell2.text = valstr
xcell3 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][2]
except:
valstr = ' '
xcell3.text = valstr
"""
xcell4 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][3]
except:
valstr = ' '
xcell4.text = valstr
"""
xcell5 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][4]
except:
valstr = ' '
xcell5.text = valstr
xcell6 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][5]
except:
valstr = ' '
xcell6.text = valstr
xcell7 = ET.SubElement(xrow, 'td', {'class':'bgdcolIptc'})
try:
valstr = valuesProp[rowcounter][6]
except:
valstr = ' '
xcell7.text = valstr
xcell8 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc'})
try:
valstr = valuesProp[rowcounter][7]
except:
valstr = ' '
xcell8.text = valstr
xcell9 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc2'})
try:
valstr = valuesProp[rowcounter][9]
except:
valstr = ' '
xcell9.text = valstr
xcell10 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc'})
try:
valstr = valuesProp[rowcounter][10]
except:
valstr = ' '
xcell10.text = valstr
xcell11 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc2'})
try:
valstr = valuesProp[rowcounter][11]
except:
valstr = ' '
xcell11.text = valstr
xcell12 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc'})
try:
valstr = valuesProp[rowcounter][12]
except:
valstr = ' '
xcell12.text = valstr
xcell13 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc2'})
try:
valstr = valuesProp[rowcounter][13]
except:
valstr = ' '
xcell13.text = valstr
xcell14 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc'})
try:
valstr = valuesProp[rowcounter][14]
except:
valstr = ' '
xcell14.text = valstr
xcell15 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc2'})
try:
valstr = valuesProp[rowcounter][15]
except:
valstr = ' '
xcell15.text = valstr
xcell16 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc'})
try:
valstr = valuesProp[rowcounter][16]
except:
valstr = ' '
xcell16.text = valstr
xcell17 = ET.SubElement(xrow, 'td', {'class':'bgdcolNoniptc2'})
try:
valstr = valuesProp[rowcounter][17]
except:
valstr = ' '
xcell17.text = valstr
filename = "IPTC-VideoMetadataHub-mapping-Rec_"+StdVersion+".html"
with open(filename, 'w') as file:
file.write(ET.tostring(xroot, pretty_print=True).decode())
moreatlink = valuesProp[0][7]
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - Apple Quicktime', 'Apple Quicktime', moreatlink, 7, 'IPTC-VideoMetadataHub-mapping-AppleQT-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - MPEG 7', 'MPEG 7', moreatlink, 9,'IPTC-VideoMetadataHub-mapping-MPEG7-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - NewsML-G2', 'NewsML-G2', moreatlink, 10,'IPTC-VideoMetadataHub-mapping-NewsMLG2-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - PB Core 2.1', 'PB Core 2.1', moreatlink, 11,'IPTC-VideoMetadataHub-mapping-PBCore21-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - Schema.org', 'Schema.org', moreatlink, 12,'IPTC-VideoMetadataHub-mapping-SchemaOrg-Rec_'+StdVersion+'.html')
# new in 2018-03
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - Sony Cameras ', 'Sony XDCAM & Planning', moreatlink, 13,'IPTC-VideoMetadataHub-mapping-SonyXDCAM-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - Panasonic Cameras', 'Panasonic/SMPTE P2', moreatlink, 14,'IPTC-VideoMetadataHub-mapping-Panasonic-SMPTEP2-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - Canon Cameras', 'Canon VideoClip XML', moreatlink, 15,'IPTC-VideoMetadataHub-mapping-CanonVClip-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - exiftool', 'exiftool field id', moreatlink, 16,'IPTC-VideoMetadataHub-mapping-exiftool-Rec_'+StdVersion+'.html')
createSpecificMapping(valuesProp, 'IPTC Video Metadata Hub - Recommendation ' + StdVersion + HeaderAppendix + '/ Mapping VMHub - EIDR Data Fields 2.0', 'EIDR Data Fields 2.0', moreatlink, 17,'IPTC-VideoMetadataHub-mapping-EIDR-Rec_'+StdVersion+'.html')
if __name__ == '__main__':
main()
| 2.9375 | 3 |
imgtopdf/__init__.py | AVIPAGHADAR1729/imgtopdf | 0 | 12791384 | <filename>imgtopdf/__init__.py
from .imgtopdf import get_images_and_convert
# https://towardsdatascience.com/how-to-build-your-first-python-package-6a00b02635c9 | 1.578125 | 2 |
scratch.py | satyarth934/DCASE2020_task1 | 0 | 12791385 | import glob
import numpy as np
from pprint import pprint as pp
g = glob.glob("data/*/*/audio/*.wav")
wavpath = g[:10]
pp(wavpath)
res = [("/".join(wp.split("/")[:-2]), "/".join(wp.split("/")[-2:])) for wp in wavpath]
pp(res) | 2.71875 | 3 |
database.py | jhuiry8/fdfsdsdf | 16 | 12791386 | from __future__ import annotations
from datetime import datetime
from typing import List, Dict, Union, Optional, TYPE_CHECKING
import pymongo
from pymongo import MongoClient
from pytz import timezone
import config
if TYPE_CHECKING:
import discord
JsonData = Dict[str, Union[str, int]]
cluster = MongoClient(config.mongo_client)
db: MongoClient = cluster[config.cluster_name]
collection: MongoClient = db[config.collection_name]
def daily_leaderboard() -> List[JsonData]:
print(
list(collection.find({}).sort(
"dailyTime", pymongo.DESCENDING)
)[:10]
)
return list(collection.find({}).sort(
"dailyTime", pymongo.DESCENDING)
)[:10]
def weekly_leaderboard() -> List[JsonData]:
return list(collection.find({}).sort(
"weeklyTime", pymongo.DESCENDING)
)[:10]
def monthly_leaderboard() -> List[JsonData]:
return list(collection.find({}).sort(
"monthlyTime", pymongo.DESCENDING)
)[:10]
def member_leaderboard() -> List[JsonData]:
return list(collection.find({}).sort(
"memberTime", pymongo.DESCENDING)
)[:10]
def member_details(member_id) -> Optional[JsonData]:
member = collection.find_one({"_id": member_id})
return member if str(member) != "none" else None
def resetDaily():
"""
Resets daily time of all members
"""
collection.update_many({}, {"$set": {"dailyTime": 0}})
def resetWeekly():
"""
Resets weekly time of all members
"""
collection.update_many({}, {"$set": {"weeklyTime": 0}})
def resetMonthly():
"""
Resets monthly time of all members.
"""
collection.update_many({}, {"$set": {"monthlyTime": 0}})
def end(member: discord.Member):
"""
Updates total Study time for members when they leave.
:param member:
The member that left the voice channel.
"""
now: datetime = datetime.now(timezone('Asia/Kolkata'))
now_str: str = now.strftime("%H:%M")
user = collection.find_one({"_id": str(member.id)})
join_time = str(user["startTime"])
join_hour, join_minutes = join_time.split(':')
join_minutes = int(join_hour) * 60 + int(join_minutes)
current_hour, current_minutes = now_str.split(':')
current_minutes = int(current_hour) * 60 + int(current_minutes)
if current_minutes < join_minutes:
daily_time = current_minutes
difference = (1440 - join_minutes) + current_minutes
weekly_time = current_minutes if int(now.weekday()) == 0 else difference
monthly_time = current_minutes if int(now.day) == 1 else difference
else:
difference = current_minutes - join_minutes
daily_time = difference
weekly_time = difference
monthly_time = difference
collection.update_one(
{"_id": str(member.id)},
{
"$inc": {
"memberTime": int(difference),
"monthlyTime": int(monthly_time),
"weeklyTime": int(weekly_time),
"dailyTime": int(daily_time)
}
}
)
collection.update_one(
{"_id": str(member.id)},
{"$set": {"startTime": 0}}
)
def update_join(member: discord.Member, _before_flag, _after_flag):
"""
Updates join data for existing members
:param member:
The member who joined the study channel
:param _before_flag:
The flag before the member joined the study channel
:param _after_flag:
The flag after the member joined the study channel
"""
now: str = datetime.now(timezone('Asia/Kolkata')).strftime("%H:%M")
collection.update_one(
{"_id": str(member.id)},
{
"$set": {
"startTime": now,
"name#": str(member.name + "#" + member.discriminator)
}
}
)
def add(member: discord.Member, _before_flag, _after_flag):
"""
Adds new entry in database for new members.
:param member:
The member who joined the study channel
:param _before_flag:
The flag before the member joined the study channel
:param _after_flag:
The flag after the member joined the study channel
"""
now: str = datetime.now(timezone('Asia/Kolkata')).strftime("%H:%M")
post = {
"_id": str(member.id),
"memberTime": 0,
"monthlyTime": 0,
"weeklyTime": 0,
"dailyTime": 0,
"startTime": now,
"name#": str(member.name + "#" + member.discriminator)
}
collection.insert_one(post)
def join(member: discord.Member, before_flag, after_flag):
"""
Called once member joins study channel.
:param member:
The member who joined the study channel
:param before_flag:
The flag before the member joined the study channel
:param after_flag:
The flag after the member joined the study channel
"""
if before_flag == after_flag:
return
user_exist = str(collection.find_one({"_id": str(member.id)}))
if user_exist == "None":
add(member, before_flag, after_flag)
else:
update_join(member, before_flag, after_flag)
| 2.5625 | 3 |
datasets/ChEMBL_STRING/step_02.py | chao1224/SGNN-EBM | 0 | 12791387 | <filename>datasets/ChEMBL_STRING/step_02.py<gh_stars>0
import pandas as pd
import requests
import urllib
import argparse
import urllib.request
import xml.etree.ElementTree as ET
from multiprocessing import Pool
from tqdm import tqdm
from time import sleep
from requests.models import HTTPError
'''
http://www.uniprot.org/uniprot/O75713
http://www.uniprot.org/uniprot/D3DTF2
https://string-db.org/api/tsv/get_string_ids?identifiers=D3DTF2
https://string-db.org/api/json/network?identifiers=[your_identifiers]&[optional_parameters]
check this: https://string-db.org/cgi/access
'''
parser = argparse.ArgumentParser()
parser.add_argument('--n-proc', type=int, default=12, help='number of processes to run when downloading assay & target information')
args = parser.parse_args()
def mapping_to_string_API(valid_string_set):
string_api_url = "https://version-11-0.string-db.org/api"
output_format = "tsv-no-header"
method = "network"
request_url = "/".join([string_api_url, output_format, method])
print('request_url\t', request_url)
valid_string_set = list(valid_string_set)
params = {
"identifiers": "%0d".join(valid_string_set), # your protein
"species": 9606, # species NCBI identifier
}
print('len of genes\t', len(valid_string_set))
response = requests.post(request_url, data=params)
print(response)
with open('string_ppi_score.tsv', 'w') as string_ppi_file:
pair_count, pos_pair_count = 0, 0
for line in response.text.strip().split("\n"):
l = line.strip().split("\t")
p1, p2 = '{}'.format(l[0]), '{}'.format(l[1])
experimental_score = float(l[10])
# print("\t".join([p1, p2, "experimentally confirmed (prob. %.3f)" % experimental_score]))
print('{}\t{}\t{}'.format(p1, p2, experimental_score), file=string_ppi_file)
pair_count += 1
if experimental_score > 0.2:
pos_pair_count += 1
print(pair_count, '\t', pos_pair_count)
print()
def query_stringid(uniprot):
website = 'https://version-11-0.string-db.org/api/xml/get_string_ids?identifiers={}'.format(uniprot)
try:
with urllib.request.urlopen(website) as conn:
data = conn.read().decode("utf-8")
except HTTPError:
data = ''
if data:
root = ET.fromstring(data)
string_id_result = root.find('record/stringId')
if string_id_result is not None:
return string_id_result.text
print('error on {}: {}'.format(uniprot, data))
return ''
def store_mapping_from_uniprot_to_string_id(uniprot_set):
print('Storing mapping from uniprot to string to uniprot2string.tsv...')
with Pool(args.n_proc) as p:
string_id_set = p.map(query_stringid, tqdm(uniprot_set))
num_errors = 0
with open('uniprot_without_strid.txt', 'w') as r, open('uniprot2string.tsv', 'w') as g:
for uniprot, string_id in zip(uniprot_set, string_id_set):
if string_id:
g.write('{}\t{}\n'.format(uniprot, string_id))
else:
r.write(uniprot + '\n')
num_errors += 1
print('Done storing. Number of errors: {}. Mapped uniprots: {}'.format(num_errors, len(uniprot_set) - num_errors))
if __name__ == '__main__':
'''
Assay ID\tTarget ID\tTarget Name\tOrganism\t{UniProt list}
'''
assay2target_fname = 'assay2target.tsv'
uniprot_set = set()
with open(assay2target_fname, 'r') as assay2target_file:
assay2target_file.readline()
for line in assay2target_file:
line = line.strip().split('\t')
uniprot_list = line[-1].strip().split(',')
# print(uniprot_list)
for uniprot in uniprot_list:
if len(uniprot) != 6:
continue
uniprot_set.add(uniprot)
store_mapping_from_uniprot_to_string_id(uniprot_set)
with open('uniprot2string.tsv', 'r') as uniprot2string_file:
valid_string_set = set()
for line in uniprot2string_file:
line = line.strip().split('\t')
uniprot = line[0]
string_id = line[1]
valid_string_set.add(string_id)
mapping_to_string_API(valid_string_set)
| 2.84375 | 3 |
union-find-by-rank.py | zLianK/algorithms | 0 | 12791388 | <gh_stars>0
from collections import defaultdict
# The structure to represent the graph
class Graph:
def __init__(self, vertices):
self.V = vertices
self.edges = defaultdict(list)
def add_edge(self, u, v):
self.edges[u].append(v)
# The structure to represent a subset
class Subset:
def __init__(self, parent, rank):
self.parent = parent
self.rank = rank
# This function unite sets
# The bigger rank becomes the parent of the smaller one
# If both ranks are the same then make one as parent of the other
# and increment its rank by one
def union(subsets, u, v):
if subsets[u].rank > subsets[v].rank:
subsets[v].parent = u
elif subsets[v].rank > subsets[u].rank:
subsets[u].parent = v
else:
subsets[v].parent = u
subsets[u].rank += 1
# Find the set's parent and make the path compression if needed
def find(subsets, node):
if subsets[node].parent != node:
subsets[node].parent = find(subsets, subsets[node].parent)
return subsets[node].parent
# Check if there is an cycle in the graph
def is_cycle(graph):
subsets = []
for u in range(graph.V):
subsets.append(Subset(u, 0))
# Iterate over all edges of the graph
# If the parents of both vertices are the same
# Then there is a cycle
for i in graph.edges:
x = find(subsets, i)
for j in graph.edges[i]:
y = find(subsets, j)
if x == y:
return True
union(subsets, x, y)
def main():
g = Graph(6)
g.add_edge(0, 1)
g.add_edge(0, 4)
g.add_edge(1, 2)
g.add_edge(1, 4)
g.add_edge(2, 3)
g.add_edge(3, 4)
g.add_edge(3, 5)
if is_cycle(g):
print('Cycle')
else:
print('Not Cycle')
if __name__ == '__main__':
main()
| 3.390625 | 3 |
code/decoder.py | JacksonFrank/GEMSECDataPipelining | 0 | 12791389 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 1 11:41:59 2019
@author: gemsec-user
"""
import numpy as np
import prody as pd
import PeptideBuilder as pb
import os
import Bio
import cleaning
from decimal import Decimal
from symbols import *
#from pdbtools import pdbtools as pdb
d = os.getcwd()
parsed_aa = {}
# parses peptides and creates file structures to store these peptides
# stores a dictionary with peptide keys that map to the atoms that make it up
def parse_aa():
if not os.path.exists(d + '/amino_acids'):
os.mkdir(d + '/amino_acids')
global parsed_aa
for amino in AA:
# out writes information to files
out = Bio.PDB.PDBIO()
# i is a peptide structure from amino acid
i = pb.make_structure(amino, [180]*len(amino),[180]*len(amino))
out.set_structure(i)
out.save(d + '/amino_acids/' + amino + '.pdb')
cleaning.cleanATOM(d + '/amino_acids/' + amino + '.pdb', out_file= d + '/amino_acids/' + amino + '.pdb', ext = '.pdb')
temp = pd.parsePDB(d + '/amino_acids/' + amino + ".pdb")
# maps amino acids to their atoms
parsed_aa[amino] = []
for atom in temp.iterAtoms():
parsed_aa[amino].append(str(atom.getName()))
# what are nodes? (2d array)
# returns the atoms from the given nodes
def remove_padding(nodes):
atoms = []
current = 0
# gets the currrent column of the first 5 rows
col = nodes[0:5, current]
while sum(col) != 0:
# adds the element index of the current node column
atoms.append(ELEMENT_INDEX[col.tolist().index(1.0)])
current += 1
col = nodes[0:5, current]
return atoms
# checks the rate of correctness in heuristic efficiency
def heuristic(index, node, amino_acid):
correct = 0
total = 0
for atom in parsed_aa[amino_acid]:
if (index+total) < len(node) and ELEMENT_SYMBOLS[int(node[index+total][0]) - 1] == atom[0]:
correct += 1
total += 1
return float(correct/total)
# finds all possible sequences of amino acid sequences keyed to heuristic efficiency values
def find_sequence_recurs(nodes, length, current_ind, current_sequence, current_value):
if len(parsed_aa.keys()) == 0:
parse_aa()
# adds the given value and sequence to the possible sequences dictionary
if len(current_sequence) == length:
global POSSIBLE_SEQUENCES
if current_value in POSSIBLE_SEQUENCES:
POSSIBLE_SEQUENCES[current_value].append(current_sequence)
else:
POSSIBLE_SEQUENCES[current_value] = [current_sequence]
values = []
for a in AA:
values.append(heuristic(current_ind,nodes, a))
max_value = max(values)
if max_value > 0.8:
for i in range(len(values)):
if max_value == values[i]:
amino = AA[i]
find_sequence_recurs(nodes, length, current_ind + len(parsed_aa[amino]), current_sequence + amino, current_value + max_value)
# returns a string of whitespace specified
def find_white_space(total_space, text):
return ' '*(total_space - len(text))
POSSIBLE_SEQUENCES = None
# what are nodes?
# decodes information into a pdb file
# what does encoding look like?
def decode(encoding, save_loc = d, save_name = '', find_coord = False, use_coord = False):
if len(parsed_aa.keys()) == 0:
parse_aa()
if save_name == '':
save_name = encoding['sequence'] + '.pdb'
placed = []
new_nodes = remove_padding(encoding['index'])
if not use_coord:
D = encoding['secondary']
placed.append([new_nodes[0], (0,0,0)])
placed.append([new_nodes[1], (D[0,1],0,0)])
x = (D[1,2]**2 - D[0,2]**2 - D[0,1]**2)/(-2 * D[0,1])
y = (abs(D[0,2]**2 - x**2))**(0.5)
placed.append([new_nodes[2], (x,y,0)])
P = placed[2][1][0]**2 + placed[2][1][1]**2
for i in range(3,len(new_nodes)):
x = (D[1,i]**2 - D[0,i]**2 - D[0,1]**2)/(-2*D[0,1])
y = (D[2,i]**2 - D[0,i]**2 - P + (2*x*placed[2][1][0]))/(-2*placed[2][1][1])
z = (abs(D[0,i]**2 - x**2 - y**2))**(0.5)
placed.append([new_nodes[i], (x,y,z)])
if find_coord:
final = np.zeros((len(encoding['secondary'][0]),3))
for i in range(len(placed)):
final[i, 0] = placed[i][1][0]
final[i, 1] = placed[i][1][1]
final[i, 2] = placed[i][1][2]
return final
else:
for i in range(3,len(new_nodes)):
placed.append([new_nodes[i], (encoding['coordinates'][i][0],encoding['coordinates'][i][1],encoding['coordinates'][i][2])])
with open(save_loc + '/' + save_name, 'w+') as g:
counter = 0
amino_num = 0
for i in range(len(placed)):
if counter == 0:
counter = len(parsed_aa[encoding['ele_to_amino'][i][1]])
amino_num += 1
string = 'ATOM' #+ str(i + 1) + ' '+ encoding['seq_to_atoms'][i][0]
string += find_white_space(7, str(i + 1)) + str(i+1) + ' '
string += encoding['ele_to_amino'][i][0] + find_white_space(4, encoding['ele_to_amino'][i][0])
string += AA3[AA.index(encoding['ele_to_amino'][i][1])] + ' A'
string += find_white_space(4, str(amino_num)) + str(amino_num)
string += find_white_space(12, str(round(Decimal(placed[i][1][0]), 3))) + str(round(Decimal(placed[i][1][0]), 3))
string += find_white_space(8, str(round(Decimal(placed[i][1][1]), 3))) + str(round(Decimal(placed[i][1][1]), 3))
string += find_white_space(8, str(round(Decimal(placed[i][1][2]), 3))) + str(round(Decimal(placed[i][1][2]), 3))
string += ' 1.00 0.00'
string += find_white_space(11, placed[i][0]) + placed[i][0]
g.write(string + '\n')
counter -= 1
return save_loc + '/' + save_name
| 2.78125 | 3 |
webservice/server/server/summ_eval/server/cli/__init__.py | mymusise/emnlp19-moverscore | 141 | 12791390 | def main():
from summ_eval.server import EvalServer
from summ_eval.server.helper import get_run_args
args = get_run_args()
server = EvalServer(args)
server.start()
server.join() | 1.484375 | 1 |
tests/test_target.py | projectsyn/commodore | 39 | 12791391 | """
Unit-tests for target generation
"""
import os
import click
import pytest
from pathlib import Path as P
from textwrap import dedent
from commodore import cluster
from commodore.inventory import Inventory
from commodore.config import Config
@pytest.fixture
def data():
"""
Setup test data
"""
tenant = {
"id": "mytenant",
"displayName": "My Test Tenant",
}
cluster = {
"id": "mycluster",
"displayName": "My Test Cluster",
"tenant": tenant["id"],
"facts": {
"distribution": "rancher",
"cloud": "cloudscale",
},
"dynamicFacts": {
"kubernetes_version": {
"major": "1",
"minor": "21",
"gitVersion": "v1.21.3",
}
},
"gitRepo": {
"url": "ssh://[email protected]/cluster-catalogs/mycluster",
},
}
return {
"cluster": cluster,
"tenant": tenant,
}
def cluster_from_data(data) -> cluster.Cluster:
return cluster.Cluster(data["cluster"], data["tenant"])
def _setup_working_dir(inv: Inventory, components):
for cls in components:
defaults = inv.defaults_file(cls)
os.makedirs(defaults.parent, exist_ok=True)
defaults.touch()
component = inv.component_file(cls)
os.makedirs(component.parent, exist_ok=True)
component.touch()
def test_render_bootstrap_target(tmp_path: P):
components = ["foo", "bar"]
inv = Inventory(work_dir=tmp_path)
_setup_working_dir(inv, components)
target = cluster.render_target(inv, "cluster", ["foo", "bar", "baz"])
classes = [
"params.cluster",
"defaults.foo",
"defaults.bar",
"global.commodore",
]
assert target != ""
print(target)
assert len(target["classes"]) == len(
classes
), "rendered target includes different amount of classes"
for i in range(len(classes)):
assert target["classes"][i] == classes[i]
assert target["parameters"]["_instance"] == "cluster"
def test_render_target(tmp_path: P):
components = ["foo", "bar"]
inv = Inventory(work_dir=tmp_path)
_setup_working_dir(inv, components)
target = cluster.render_target(inv, "foo", ["foo", "bar", "baz"])
classes = [
"params.cluster",
"defaults.foo",
"defaults.bar",
"global.commodore",
"components.foo",
]
assert target != ""
print(target)
assert len(target["classes"]) == len(
classes
), "rendered target includes different amount of classes"
for i in range(len(classes)):
assert target["classes"][i] == classes[i]
assert target["parameters"]["kapitan"]["vars"]["target"] == "foo"
assert target["parameters"]["_instance"] == "foo"
def test_render_aliased_target(tmp_path: P):
components = ["foo", "bar"]
inv = Inventory(work_dir=tmp_path)
_setup_working_dir(inv, components)
target = cluster.render_target(inv, "fooer", ["foo", "bar", "baz"], component="foo")
classes = [
"params.cluster",
"defaults.foo",
"defaults.bar",
"global.commodore",
"components.foo",
]
assert target != ""
print(target)
assert len(target["classes"]) == len(
classes
), "rendered target includes different amount of classes"
for i in range(len(classes)):
assert target["classes"][i] == classes[i]
assert target["parameters"]["kapitan"]["vars"]["target"] == "fooer"
assert target["parameters"]["foo"] == "${fooer}"
assert target["parameters"]["_instance"] == "fooer"
def test_render_aliased_target_with_dash(tmp_path: P):
components = ["foo-comp", "bar"]
inv = Inventory(work_dir=tmp_path)
_setup_working_dir(inv, components)
target = cluster.render_target(
inv, "foo-1", ["foo-comp", "bar", "baz"], component="foo-comp"
)
classes = [
"params.cluster",
"defaults.foo-comp",
"defaults.bar",
"global.commodore",
"components.foo-comp",
]
assert target != ""
print(target)
assert len(target["classes"]) == len(
classes
), "rendered target includes different amount of classes"
for i in range(len(classes)):
assert target["classes"][i] == classes[i]
assert target["parameters"]["kapitan"]["vars"]["target"] == "foo-1"
assert target["parameters"]["foo_comp"] == "${foo_1}"
assert target["parameters"]["_instance"] == "foo-1"
def test_render_params(data, tmp_path: P):
cfg = Config(work_dir=tmp_path)
target = cfg.inventory.bootstrap_target
params = cluster.render_params(cfg.inventory, cluster_from_data(data))
assert "parameters" in params
params = params["parameters"]
assert "cluster" in params
assert "name" in params["cluster"]
assert params["cluster"]["name"] == "mycluster"
assert target in params
target_params = params[target]
assert "name" in target_params
assert target_params["name"] == "mycluster"
assert "display_name" in target_params
assert target_params["display_name"] == "My Test Cluster"
assert "catalog_url" in target_params
assert (
target_params["catalog_url"]
== "ssh://[email protected]/cluster-catalogs/mycluster"
)
assert "tenant" in target_params
assert target_params["tenant"] == "mytenant"
assert "tenant_display_name" in target_params
assert target_params["tenant_display_name"] == "My Test Tenant"
assert "dist" in target_params
assert target_params["dist"] == "rancher"
assert "facts" in params
assert params["facts"] == data["cluster"]["facts"]
assert "dynamic_facts" in params
dyn_facts = params["dynamic_facts"]
assert "kubernetes_version" in dyn_facts
k8s_ver = dyn_facts["kubernetes_version"]
assert "major" in k8s_ver
assert "minor" in k8s_ver
assert "gitVersion" in k8s_ver
assert "1" == k8s_ver["major"]
assert "21" == k8s_ver["minor"]
assert "v1.21.3" == k8s_ver["gitVersion"]
assert "cloud" in params
assert "provider" in params["cloud"]
assert params["cloud"]["provider"] == "cloudscale"
assert "customer" in params
assert "name" in params["customer"]
assert params["customer"]["name"] == "mytenant"
def test_missing_facts(data, tmp_path: P):
data["cluster"]["facts"].pop("cloud")
cfg = Config(work_dir=tmp_path)
with pytest.raises(click.ClickException):
cluster.render_params(cfg.inventory, cluster_from_data(data))
def test_empty_facts(data, tmp_path: P):
data["cluster"]["facts"]["cloud"] = ""
cfg = Config(work_dir=tmp_path)
with pytest.raises(click.ClickException):
cluster.render_params(cfg.inventory, cluster_from_data(data))
def test_read_cluster_and_tenant(tmp_path):
cfg = Config(work_dir=tmp_path)
file = cfg.inventory.params_file
os.makedirs(file.parent, exist_ok=True)
with open(file, "w") as f:
f.write(
dedent(
"""
parameters:
cluster:
name: c-twilight-water-9032
tenant: t-delicate-pine-3938"""
)
)
cluster_id, tenant_id = cluster.read_cluster_and_tenant(cfg.inventory)
assert cluster_id == "c-twilight-water-9032"
assert tenant_id == "t-delicate-pine-3938"
def test_read_cluster_and_tenant_missing_fact(tmp_path):
inv = Inventory(work_dir=tmp_path)
file = inv.params_file
os.makedirs(file.parent, exist_ok=True)
with open(file, "w") as f:
f.write(
dedent(
"""
classes: []
parameters: {}"""
)
)
with pytest.raises(KeyError):
cluster.read_cluster_and_tenant(inv)
| 2.3125 | 2 |
raster_info_car.py | leandromet/Geoprocessamento---Geoprocessing | 2 | 12791392 | <filename>raster_info_car.py
#-------------------------------------------------------------------------------
# Name: Raster information from vector relations
# Purpose: Classify features of interest based on a raster with pixels that have classification values.
# Having a catalog in a vector layer with adresses of images related to each polygon, count
# the pixels with given values that are inside any given polygon. The raster files have a
# land usage classification that was automaticaly generated, this classification covers the
# whole country. We have rural properties boundaries and other poligons that we want to verify
# how much area was classified as being one of 13 distinct classes. This aproach gets each
# image boundary polygon intersection with each feature of interest and builds a raster mask.
# The mask has the same resolution as the original image (RapidEye, 5 meters) with binary values,
# being 1 if the pixel is part of the intersection and 0 if it is not. This mask is then multiplied
# as a matrix by the matrix of pixel values from the image (in this case 14 possible values).
# Finally a histogram is made with bins that separate the intended classes and the count of
# each bin is added to the vector layer with features of interest.
#
# Author: leandro.biondo
#
# Created: 05/10/2016
# Copyright: (c) leandro.biondo 2016
# Licence: GNU GLP
#-------------------------------------------------------------------------------
#!/usr/bin/env python
# import modules
import gdal
import numpy as np
from osgeo import ogr, osr
import glob
import os
gdal.UseExceptions()
#
#shapefilebr = "C:/biondo/buff_nasc.shp"
#driver = ogr.GetDriverByName("ESRI Shapefile")
#dataSourcebr = driver.Open(shapefilebr, True)
#layerbr = dataSourcebr.GetLayer()
#Here should be given the vector layer with the catalog, This catalog can be built with the Qgis plugin
#"Image Footprint", it is necessary to select image boudary option. The path (caminho) field will be used to open
#the images with classified pixels, you can use a * as mask if there are more then 1 catalog
for infile in glob.glob(r'/home/gecad/CAR/Demandas/Nascentes/aaa_nascentes_catalogo.shp'):
print infile
rapideye = infile
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource_rd = driver.Open(rapideye, True)
layer_rd = dataSource_rd.GetLayer()
shapefile = ('/home/gecad/CAR/Demandas/Nascentes/aaa_nascentes_catalogo.shp')
dataSource = driver.Open(shapefile, True)
layer = dataSource.GetLayer()
layer.CreateField(ogr.FieldDefn("indef", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("uso_cons", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("rvegnat", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("vereda", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("mangue", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("salgado", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("apicum", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("restinga", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("agua", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("vegremo", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("regene", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("areaurb", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("nuvens", ogr.OFTInteger),False)
layer.CreateField(ogr.FieldDefn("foraLi", ogr.OFTInteger),False)
pixel_size = 5
NoData_value = 255
contard =0
c5=0
for feat_rd in layer_rd:
caminho_img = feat_rd.GetField("caminho")
print caminho_img
try:
src_ds = gdal.Open( caminho_img)
except RuntimeError, e:
print 'Unable to open INPUT'
print e
#break
continue
try:
srcband = src_ds.GetRasterBand(1)
print srcband
except RuntimeError, e:
# for example, try GetRasterBand(10)
print 'Band ( %i ) not found' % band_num
print e
#sys.exit(1)
continue
banda_class = srcband.ReadAsArray().astype(np.float)
if banda_class.size==(5000*5000):
classes = banda_class
geom=feat_rd.GetGeometryRef()
#print 'spat ', layer_rd.GetSpatialRef()
# print 'proj ', src_ds.GetProjection()
contorno=geom.GetEnvelope()
x_min = contorno[0]
y_max = contorno[3]
x_res = 5000
y_res = 5000
# target_ds = gdal.GetDriverByName('MEM').Create('', x_res, y_res, gdal.GDT_Byte)
# target_ds.SetGeoTransform(src_ds.GetGeoTransform())
# target_ds.SetProjection(src_ds.GetProjection())
# band = target_ds.GetRasterBand(1)
# band.SetNoDataValue(NoData_value)
#
contard=contard+1
conta=0
cont_loop=0
for feature in layer:
geom2=feature.GetGeometryRef()
verifica_f=feature.GetField("foraLi")
#print 'feat' , caminho_feat
#print verifica_f
cont_loop+=1
if geom2.Intersects(geom) :
c5+=1
if (verifica_f is None):
intersect = geom.Intersection(geom2)
print intersect.GetArea()
print (intersect.GetArea()/geom2.GetArea())
if (intersect.GetArea()/geom2.GetArea())<0.5:
continue
conta+=1
SpatialRef = osr.SpatialReference()
SpatialRef.SetWellKnownGeogCS( "EPSG:4674" )
memoutdriver=ogr.GetDriverByName('MEMORY')
memsource=memoutdriver.CreateDataSource('memData')
tmp=memoutdriver.Open('memData', 1)
dstlayer = memsource.CreateLayer('teste', SpatialRef)
target_ds = gdal.GetDriverByName('MEM').Create('', x_res, y_res, gdal.GDT_Byte)
target_ds.SetGeoTransform(src_ds.GetGeoTransform())
target_ds.SetProjection(src_ds.GetProjection())
band = target_ds.GetRasterBand(1)
band.SetNoDataValue(NoData_value)
dstfeature = ogr.Feature(dstlayer.GetLayerDefn())
dstfeature.SetGeometry(intersect)
dstlayer.CreateFeature(dstfeature)
# print 'resultado', dstfeature.GetGeometryRef().GetEnvelope()
# Rasterize
gdal.RasterizeLayer(target_ds, [1], dstlayer, burn_values=[1])
array = band.ReadAsArray()
#print np.histogram(array, bins=[0,1,250,300])
# Read as array
dstlayer=None
memsource.Destroy()
#tabela = srcband.ReadAsArray()
#print tabela
resposta1 = np.histogram(classes, bins=[0,1,20])
classes2 = classes*array
resposta = np.histogram(classes2, bins=[0,1,2,3,4,5,6,7,8,9,10,11,12,20])
feature.SetField("indef", int(resposta1[0][0]*25))
feature.SetField("uso_cons", int(resposta[0][1]*25))
feature.SetField("rvegnat", int(resposta[0][2]*25))
feature.SetField("vereda", int(resposta[0][3]*25))
feature.SetField("mangue", int(resposta[0][4]*25))
feature.SetField("salgado", int(resposta[0][5]*25))
feature.SetField("apicum", int(resposta[0][6]*25))
feature.SetField("restinga", int(resposta[0][7]*25))
feature.SetField("agua", int(resposta[0][8]*25))
feature.SetField("vegremo", int(resposta[0][9]*25))
feature.SetField("regene", int(resposta[0][10]*25))
feature.SetField("areaurb", int(resposta[0][11]*25))
feature.SetField("nuvens", int(resposta[0][12]*25))
feature.SetField("foraLi", int((resposta[0][0]-resposta1[0][0])*25))
layer.SetFeature(feature)
feature.Destroy()
print "ImagemImovel: %d | %d | %d | %d" % (c5,contard,conta,cont_loop)
c5+=1
#create an image file and put the results in 3 band for testing purposes
#
# saida = "/home/gecad/CAR/Demandas/Nascentes/img_testes/img%d%d.tif" % (contard,c5)
# format = "GTiff"
# driver2 = gdal.GetDriverByName( format )
# metadata = driver2.GetMetadata()
# if metadata.has_key(gdal.DCAP_CREATE) \
# and metadata[gdal.DCAP_CREATE] == 'YES':
# print 'Driver %s supports Create() method.' % format
# if metadata.has_key(gdal.DCAP_CREATECOPY) \
# and metadata[gdal.DCAP_CREATECOPY] == 'YES':
# print 'Driver %s supports CreateCopy() method.' % format
#
# dst_ds = driver2.Create( saida, 5000, 5000, 3, gdal.GDT_Float32, ['COMPRESS=LZW'] )
# srs = osr.SpatialReference()
# dst_ds.SetProjection(src_ds.GetProjection())
# dst_ds.SetGeoTransform(src_ds.GetGeoTransform())
#
# dst_ds.GetRasterBand(1).WriteArray(classes)
# dst_ds.GetRasterBand(2).WriteArray(array)
# dst_ds.GetRasterBand(3).WriteArray(classes2)
# dst_ds=None
# #
# if c5==10:
# layer=None
# dataSource=None
# layerbr=None
# dataSourcebr=None
# layer_rd=None
# dataSource_rd=None
# target_ds= None
# print 'fim forcado'
# break
#
target_ds= None
#break
layer.ResetReading()
layer=None
dataSource=None
layerbr=None
dataSourcebr=None
layer_rd=None
dataSource_rd=None
print 'fim'
| 3.390625 | 3 |
media.py | PatrickO10/movie_trailers | 0 | 12791393 | import webbrowser
class Movie():
'''This is a class for storing information about movies.'''
def __init__(self, movie_title, movie_year, poster_image, trailer_youtube, movie_rating):
self.title = movie_title
self.year = movie_year
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.rating = movie_rating
def show_trailer(self):
'''This method opens a youtube url.'''
webbrowser.open(self.trailer_youtube_url)
| 3.59375 | 4 |
msf_2022/io/read.py | molssi-workshops/msf_sample_2022 | 0 | 12791394 | <gh_stars>0
"""
Functions for reading molecular files
"""
import numpy as np
import matplotlib.pyplot as plt
def read_pdb(f_loc: str)->tuple[list[str], np.ndarray]:
# This function reads in a pdb file and returns the atom names and coordinates.
with open(f_loc) as f:
data = f.readlines()
c = []
sym = []
for l in data:
if "ATOM" in l[0:6] or "HETATM" in l[0:6]:
sym.append(l[76:79].strip())
c2 = [float(x) for x in l[30:55].split()]
c.append(c2)
coords = np.array(c)
return sym, coords
def read_xyz(file_location):
#Open an xyz file and return symbols and coordinates
xyz_file = np.genfromtxt(fname=file_location, skip_header=2, dtype="unicode")
symbols = xyz_file[:, 0]
coords = xyz_file[:, 1:]
coords = coords.astype(np.float)
return symbols, coords | 2.921875 | 3 |
stats/constants.py | mpope9/nba-sql | 113 | 12791395 | <gh_stars>100-1000
"""
Constants used in the application.
"""
"""
List of seasons.
"""
season_list = [
'1996-97',
'1997-98',
'1998-99',
'1999-00',
'2000-01',
'2001-02',
'2002-03',
'2003-04',
'2004-05',
'2005-06',
'2006-07',
'2007-08',
'2008-09',
'2009-10',
'2010-11',
'2011-12',
'2012-13',
'2013-14',
'2014-15',
'2015-16',
'2016-17',
'2017-18',
'2018-19',
'2019-20',
'2020-21',
'2021-22'
]
"""
Headers.
"""
headers = {
'Connection': 'keep-alive',
'Accept': 'application/json, text/plain, */*',
'x-nba-stats-token': 'true',
'User-Agent': (
#'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) '
#'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130'
#'Safari/537.36'
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
),
'x-nba-stats-origin': 'stats',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Referer': 'https://stats.nba.com/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
}
"""
Team IDs. (Thank you nba-api).
"""
team_ids = [
1610612737, # 'ATL'
1610612738, # 'BOS'
1610612739, # 'CLE'
1610612740, # 'NOP'
1610612741, # 'CHI'
1610612742, # 'DAL'
1610612743, # 'DEN'
1610612744, # 'GSW'
1610612745, # 'HOU'
1610612746, # 'LAC'
1610612747, # 'LAL'
1610612748, # 'MIA'
1610612749, # 'MIL'
1610612750, # 'MIN'
1610612751, # 'BKN'
1610612752, # 'NYK'
1610612753, # 'ORL'
1610612754, # 'IND'
1610612755, # 'PHI'
1610612756, # 'PHX'
1610612757, # 'POR'
1610612758, # 'SAC'
1610612759, # 'SAS'
1610612760, # 'OKC'
1610612761, # 'TOR'
1610612762, # 'UTA'
1610612763, # 'MEM'
1610612764, # 'WAS'
1610612765, # 'DET'
1610612766, # 'CHA'
]
"""
Mapping from team abbrev to id.
"""
team_abbrev_mapping = {
'ATL': 1610612737,
'BOS': 1610612738,
'CLE': 1610612739,
'NOP': 1610612740,
'NOK': 1610612740, # Old name.
'NOH': 1610612740, # Old name.
'CHI': 1610612741,
'DAL': 1610612742,
'DEN': 1610612743,
'GSW': 1610612744,
'HOU': 1610612745,
'LAC': 1610612746,
'LAL': 1610612747,
'MIA': 1610612748,
'MIL': 1610612749,
'MIN': 1610612750,
'BKN': 1610612751,
'NJN': 1610612751, # Old name.
'NYK': 1610612752,
'ORL': 1610612753,
'IND': 1610612754,
'PHI': 1610612755,
'PHX': 1610612756,
'POR': 1610612757,
'SAC': 1610612758,
'SAS': 1610612759,
'OKC': 1610612760,
'SEA': 1610612760,
'TOR': 1610612761,
'UTA': 1610612762,
'VAN': 1610612763, # Old name.
'MEM': 1610612763,
'WAS': 1610612764,
'DET': 1610612765,
'CHA': 1610612766,
'CHH': 1610612766, # Old name.
}
"""
Play-by-play data has an EventMsgType field. This is an enum. There
is also the EventMsgActionField, which is a complex enum of
(EventMsgType, SubType).
We're going to make a lookup table of enum to value, then a lookup
table for the (EventMsgType, EventMsgActionType) pair.
"""
event_message_types = [
{'id': 1, 'string': 'FIELD_GOAL_MADE'},
{'id': 2, 'string': 'FIELD_GOAL_MISSED'},
{'id': 3, 'string': 'FREE_THROW'},
{'id': 4, 'string': 'REBOUND'},
{'id': 5, 'string': 'TURNOVER'},
{'id': 6, 'string': 'FOUL'},
{'id': 7, 'string': 'VIOLATION'},
{'id': 8, 'string': 'SUBSTITUTION'},
{'id': 9, 'string': 'TIMEOUT'},
{'id': 10, 'string': 'JUMP_BALL'},
{'id': 11, 'string': 'EJECTION'},
{'id': 12, 'string': 'PERIOD_BEGIN'},
{'id': 13, 'string': 'PERIOD_END'},
{'id': 18, 'string': 'UNKNOWN'}
]
| 1.773438 | 2 |
kokoropy/scaffolding/scaffold_cms/controllers/page.py | goFrendiAsgard/kokoropy | 5 | 12791396 | from kokoropy.controller import Crud_Controller
from ..models._all import Page
class Page_Controller(Crud_Controller):
__model__ = Page
Page_Controller.publish_route() | 1.601563 | 2 |
Leetcode/Solutions/Find_First_and_Last_Position_of_Element_in_Sorted_Array.py | fakecoinbase/sweetpandslashAlgorithms | 3 | 12791397 | <reponame>fakecoinbase/sweetpandslashAlgorithms
# Question: Given a sorted array, find the first and last indices of a target number
# Solution: Run two (lower bounded) binary searches, one for the target number and one
# for the successor of the target (the next natural number after the targer number)
# Difficulty: Medium
def searchRange(nums: List[int], target: int) -> List[int]:
def lowerBin(nums, target):
l, r = 0, len(nums) - 1
# Note: setting this while statement to be <= and not just != means it can also
# catch cases when the input is an empty array, as l = 0 and r = -1 in that case
while l <= r:
# In each iteration set the midpoint to half the difference of the left
# and right pointers offset by the left pointer
mid = (r - l) // 2 + l
# This binary search always returns the lower bound on a number because
# if the current number is less than target it shifts left to the next number to the right of mid,
# and if the number is greater than or equal to the target it shifts right to the number to the left of mid
# this ensures that if numbers are duplicated the search will always narrow into the leftmost number
if nums[mid] < target: l = mid + 1
else: r = mid - 1
return l
# This simply finds the index of the lowest target
lowerIndex = lower(nums, target)
# This finds the index of the first number larger than the target, and then subtracts
# one from the index it finds which is going to be the rightmost target
upperIndex = lower(nums, target + 1) - 1
# If we didn't go out of bounds in our search and if the number at the lowerIndex actually equals our
# target (because our binary search will return the next largest number if it didn't exist) we can return the indices
if lowerIndex < len(nums) and nums[lowerIndex] == target:
return [lowerIndex, upperIndex]
else: return [-1, -1]
| 3.78125 | 4 |
example/kd_loss.py | chris4540/DT2119-Final-Project | 1 | 12791398 | <gh_stars>1-10
import torch
# from torch.autograd import Variable
import torch.nn as nn
# import torch.nn.functional as F
import numpy as np
from torch.nn.utils.rnn import pad_sequence
from torch.nn.functional import softmax
from torch.nn.functional import log_softmax
if __name__=='__main__':
logits1 = -np.random.rand(7,3)
logits2 = -np.random.rand(5,3)
logits3 = -np.random.rand(7,3)
logits4 = -np.random.rand(5,3)
#
logits1 = torch.Tensor(logits1)
logits2 = torch.Tensor(logits2)
logits3 = torch.Tensor(logits3)
logits4 = torch.Tensor(logits4)
teacher_logits = pad_sequence([logits1, logits2])
student_logits = pad_sequence([logits3, logits2])
kd_loss = nn.KLDivLoss(reduce=False, reduction='none')(
log_softmax(student_logits, dim=-1),
softmax(teacher_logits, dim=-1))
print(kd_loss)
# kd_loss = nn.KLDivLoss(reduction='batchmean')(
# log_softmax(student_logits, dim=-1),
# softmax(teacher_logits, dim=-1))
# print(kd_loss)
# kd_loss = nn.KLDivLoss(reduction='sum')(
# log_softmax(student_logits, dim=-1),
# softmax(teacher_logits, dim=-1))
# print(kd_loss) | 2.34375 | 2 |
tests/terraform/checks/resource/aws/test_LBDeletionProtection.py | kylelaker/checkov | 3 | 12791399 | <gh_stars>1-10
import unittest
import hcl2
from checkov.terraform.checks.resource.aws.LBDeletionProtection import check
from checkov.common.models.enums import CheckResult
class TestLBDeletionProtection(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "aws_lb" "test_failed" {
name = "test-lb-tf"
internal = false
load_balancer_type = "network"
subnets = aws_subnet.public.*.id
enable_deletion_protection = false
}
""")
resource_conf = hcl_res['resource'][0]['aws_lb']['test_failed']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_missing_attribute(self):
hcl_res = hcl2.loads("""
resource "aws_lb" "test_failed" {
name = "test-lb-tf"
internal = false
load_balancer_type = "network"
subnets = aws_subnet.public.*.id
}
""")
resource_conf = hcl_res['resource'][0]['aws_lb']['test_failed']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "aws_lb" "test_success" {
name = "test-lb-tf"
internal = false
load_balancer_type = "network"
subnets = aws_subnet.public.*.id
enable_deletion_protection = true
}
""")
resource_conf = hcl_res['resource'][0]['aws_lb']['test_success']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 2.234375 | 2 |
filmfestival/migrations/0031_film_stills.py | mykonosbiennale/mykonosbiennale.github.io | 1 | 12791400 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('material', '0002_auto_20170327_0215'),
('filmfestival', '0030_reward'),
]
operations = [
migrations.AddField(
model_name='film',
name='stills',
field=models.ForeignKey(blank=True, to='material.Album', null=True),
),
]
| 1.523438 | 2 |
lib/googlecloudsdk/sql/tools/ssl_certs/__init__.py | IsaacHuang/google-cloud-sdk | 0 | 12791401 | # Copyright 2013 Google Inc. All Rights Reserved.
"""Provide commands for managing SSL certificates of Cloud SQL instances."""
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
class SslCerts(base.Group):
"""Provide commands for managing SSL certificates of Cloud SQL instances.
Provide commands for managing SSL certificates of Cloud SQL instances,
including creating, deleting, listing, and getting information about
certificates.
"""
@staticmethod
def Args(parser):
parser.add_argument(
'--instance',
'-i',
help='Cloud SQL instance ID.')
def Filter(self, tool_context, args):
if not args.instance:
raise exceptions.ToolException('argument --instance/-i is required')
| 2.25 | 2 |
layersclick/efs.py | hdknr/py-layers | 0 | 12791402 | <reponame>hdknr/py-layers
import click
from layerslib import efs as EFS
from .utils import J, setup
@click.group()
@click.option("--profile_name", "-p", default=None)
@click.pass_context
def efs(ctx, profile_name):
setup(ctx, profile_name)
@efs.command()
@click.pass_context
def efs_list(ctx):
data = EFS.get_filesystem()
click.echo(J(data))
| 1.921875 | 2 |
tests/unit/test_get_data.py | shivaq/set_aws_mfa | 0 | 12791403 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from random import randint
from set_aws_mfa.data.data_manager import ProfileTuple
from set_aws_mfa.helper import helper
from set_aws_mfa import validate
from set_aws_mfa.data import data_manager
from set_aws_mfa.helper.helper import IntObject
from set_aws_mfa import prompts
from tests.conftest import BUILTIN_INPUTS
########################
# Get profiles
########################
# 1. config, credentials 両方にいる profile に、credentials の値を合体させたリストを取得する
def test_get_perfect_profile_list(profile_obj_list, credentials_lists, perfect_profile_list):
"""テスト: 取得したリストは、Credential にも Config にも存在する プロファイルのリストかどうか"""
# GIVEN: Profile に Credentials の値も合わせた ProfileTuple のリストを取得する
profile_name_list = []
credentials_name_list = []
for i in profile_obj_list:
# Given: ProfileTuple の name だけを抽出する
profile_name_list.append(i.name)
for k in credentials_lists:
# GIVEN: CredentialsTuple の name だけを抽出する
credentials_name_list.append(k.name)
for in_both in perfect_profile_list:
assert isinstance(in_both, ProfileTuple)
# WHEN: ProfileTuple に aws_secret_access_key がセットされているならば
if in_both.aws_secret_access_key is not None:
# THEN: credentials にも config にも、その profile が存在している
assert in_both.name in credentials_name_list
assert in_both.name in profile_name_list
def test_prompt_displays_profile_name(capsys, perfect_profile_list):
"""テスト:プロファイルの選択肢が表示されるかどうか"""
# GIVEN: get perfect_profile_list
# WHEN: execute prompt_user_selection()
prompts.prompt_user_selection(perfect_profile_list)
out, err = capsys.readouterr()
# THEN: prompt usable profile name
for p in perfect_profile_list:
if p.aws_secret_access_key is not None:
# ") profile_name" is included in stdout
assert ") " + p.name in out.strip()
def test_get_selected_profile(perfect_profile_list, monkeypatch):
# GIVEN: perfect profile list
# GIVEN: Mock user input
user_input = 2
monkeypatch.setattr(BUILTIN_INPUTS, lambda _: user_input)
# WHEN: this function is called
profile = data_manager.get_selected_profile()
assert profile == perfect_profile_list[user_input - 1]
########################
# Get aws account info
########################
# テスト ~/.aws_accounts_for_set_aws_mfa が存在しない場合、False を返す
def test_no_aws_accounts_for_set_aws_mfa_returns_false(set_fake_aws_account_files):
# GIVEN: the path of AWS_ACCOUNT_FOR_SET_AWS_MFA replaced with fake path
# WHEN: Check the existence of AWS_ACCOUNT_FOR_SET_AWS_MFA
is_the_file_exists = validate.check_aws_accounts_for_set_aws_mfa_existence()
# THEN: The file is not exist
assert not is_the_file_exists
# テスト ~/.aws_accounts_for_set_aws_mfa が存在しない場合、作成する
def test_create_aws_accounts_for_set_aws_mfa(set_fake_aws_account_files, delete_fake_aws_account_files):
# GIVEN: the path of AWS_ACCOUNT_FOR_SET_AWS_MFA replaced with fake path
# GIVEN: the path of AWS_ACCOUNT_FOR_SET_AWS_MFA is not exist
# WHEN: Try to prepare AWS_ACCOUNT_FOR_SET_AWS_MFA and it is created
data_manager.prepare_aws_account_id_file()
# WHEN: Check the existence of AWS_ACCOUNT_FOR_SET_AWS_MFA
is_the_file_exists = validate.check_aws_accounts_for_set_aws_mfa_existence()
# THEN: The file is exist
assert is_the_file_exists
# テスト ~/.aws_accounts_for_set_aws_mfa 作成後、ユーザーに 該当ProfileのAWSアカウントID の入力を求める
def test_when_no_aws_account_file_asks_for_user_input(set_fake_aws_account_files, delete_fake_aws_account_files,
perfect_profile_list, capsys):
# GIVEN a Profile
profile = perfect_profile_list[0]
# WHEN create a new aws account file
if not validate.check_aws_accounts_for_set_aws_mfa_existence():
data_manager.create_aws_account_id_file()
else:
# そのファイルが既に存在していた場合、書き込みをせずに raise
raise
# THEN: ask to input aws account id for the profile
prompts.prompt_for_asking_aws_account_id(profile)
out, err = capsys.readouterr()
assert profile.name in out.rstrip()
assert prompts.PROMPT_ASK_AWS_ACCOUNT_ID_FOR_PROFILE_BEFORE in out.rstrip()
assert prompts.PROMPT_ASK_AWS_ACCOUNT_ID_FOR_PROFILE_AFTER in out.rstrip()
# ~/.aws_accounts_for_set_aws_mfa から該当ProfileのAWSアカウントIDを取得する
def test_get_aws_account_id_for_the_profile(perfect_profile_list):
"""注意: ~/.aws_accounts_for_set_aws_mfa がローカルにない場合、
テスト対象のツール使用時には該当ファイルがない場合は生成、入力がなされるが、
上記生成を行う前にこのテストは実施した際はテストに失敗する
"""
# GIVEN: a ProfileTuple
profile = perfect_profile_list[0]
# WHEN: call the function
aws_account_id = data_manager.get_aws_account_id(profile)
# THEN:
assert type(aws_account_id) == int
# テスト ユーザー入力の AWSアカウントID が int じゃない場合、False が返される
def test_user_input_is_not_int(monkeypatch):
# GIVEN: ユーザーインプットが integer ではない場合、を Mock
user_input_not_int = "hogehoge"
# GIVEN: Mock user input string
monkeypatch.setattr(BUILTIN_INPUTS, lambda _: user_input_not_int)
# WHEN: Validate the input
is_int = helper.is_input_int_loop(IntObject(), data_manager.ASKING_AWS_ACCOUNT_ID_INPUT_MESSAGE)
# THEN: It's not an int
assert not is_int
# テスト ユーザー入力の AWSアカウントID が int の場合、True が返される
def test_user_input_is_int(monkeypatch):
# GIVEN: ユーザーインプットが integer ではない場合、を Mock
user_input_not_int = "12345"
# GIVEN: Mock user input string
monkeypatch.setattr(BUILTIN_INPUTS, lambda _: user_input_not_int)
# WHEN: Validate the input
is_int = helper.is_input_int_loop(IntObject(), data_manager.ASKING_AWS_ACCOUNT_ID_INPUT_MESSAGE)
# THEN: It's not an int
assert is_int
# ~/.aws_accounts_for_set_aws_mfa に ユーザー入力の AWSアカウントIDを 記入する
def test_writing_aws_account_to_the_file(set_fake_aws_account_files, delete_fake_aws_account_files, perfect_profile_list):
# GIVEN: AWS_ACCOUNT_FOR_SET_AWS_MFA is changed to fake path
# GIVEN: Create fake AWS_ACCOUNT_FOR_SET_AWS_MFA
data_manager.create_aws_account_id_file()
# GIVEN: 対象 profile を指定する
profile = perfect_profile_list[0]
# GIVEN: 下記aws account id を取得したとする
aws_account_id = 12345
data_manager.create_aws_account_id_file()
# WHEN: check the existence of info for the given profile
data_manager.writing_aws_account_to_the_file(profile, aws_account_id)
# WHEN: AWS_ACCOUNT_FOR_SET_AWS_MFA から該当 profile の aws account id を検索した場合
retrieved_aws_account_id = data_manager.get_aws_account_id(profile)
# THEN: int の aws account id が取得できている
assert type(retrieved_aws_account_id) is int
# テスト ~/.aws_accounts_for_data_manager はするが、該当ProfileのAWSアカウントIDが存在しない場合にユーザーに入力を求める
def test_no_aws_account_id_for_given_profile_prompts_msg(set_fake_aws_account_files,
perfect_profile_list, create_fake_aws_account_files,
delete_fake_aws_account_files,
capsys, monkeypatch):
# GIVEN: Create fake AWS_ACCOUNT_FOR_data_manager
# GIVEN: No info for profile exists in fake AWS_ACCOUNT_FOR_SET_AWS_MFA
# GIVEN: 対象 profile を指定する
profile = perfect_profile_list[0]
# GIVEN: ユーザーインプットが integer ではない場合、を Mock
aws_account_id_int = "12345"
# GIVEN: Mock user input string
monkeypatch.setattr(BUILTIN_INPUTS, lambda _: aws_account_id_int)
# WHEN: check the existence of info for the given profile
data_manager.get_aws_account_id(profile)
# THEN: Prompt message to ask for input aws account id for the profile
out, err = capsys.readouterr()
print(out.rstrip())
assert profile.name in out.rstrip()
assert prompts.PROMPT_ASK_AWS_ACCOUNT_ID_FOR_PROFILE_BEFORE in out.rstrip()
assert prompts.PROMPT_ASK_AWS_ACCOUNT_ID_FOR_PROFILE_AFTER in out.rstrip()
# テスト該当プロファイルのMFA ARN を取得する
def test_get_mfa_arn(perfect_profile_list):
# GIVEN: a ProfileTuple
profile = perfect_profile_list[0]
# WHEN: call the function
mfa_arn = data_manager.get_mfa_arn(profile)
# THEN:
assert data_manager.AWS_IAM_ARN_HEAD_PART
assert data_manager.AWS_IAM_ARN_MFA_PART
assert profile.name in mfa_arn
def test_get_role_for_a_base_profile(profile_which_has_role, profile_obj_list):
"""該当プロフィールと紐づくロールを返す"""
# GIVEN: a valid profile which can switch role
# WHEN: Check a role related to a given profile
role_for_the_profile_list = data_manager.get_role_list_for_a_profile(profile_which_has_role, profile_obj_list)
# THEN: there is some roles related to the profile
if len(role_for_the_profile_list) != 0:
assert role_for_the_profile_list[0].source_profile == profile_which_has_role.name
def test_get_profile_instance_for_user_input(perfect_profile_list):
# GIVEN: validated input num
validated_input = randint(1, len(perfect_profile_list))
# WHEN: get profile instance for the input number
profile_instance = data_manager.get_specified_profile(
perfect_profile_list, validated_input)
# THEN:
assert isinstance(profile_instance, ProfileTuple)
| 2.453125 | 2 |
src/userlogs/admin.py | cbsBiram/xarala__ssr | 0 | 12791404 | from django.contrib import admin
from .models import UserLog
admin.site.register(UserLog)
| 1.226563 | 1 |
dl4nlp_pos_tagging/models/modules/seq2seq_encoders/bi_feedforward_encoder.py | michaeljneely/model-uncertainty-pos-tagging | 1 | 12791405 | <reponame>michaeljneely/model-uncertainty-pos-tagging<filename>dl4nlp_pos_tagging/models/modules/seq2seq_encoders/bi_feedforward_encoder.py
from overrides import overrides
from allennlp.modules.seq2seq_encoders.feedforward_encoder import FeedForwardEncoder
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
@Seq2SeqEncoder.register("bi-feedforward")
class BiFeedForwardEncoder(FeedForwardEncoder):
@overrides
def is_bidirectional(self) -> bool:
return True
| 2.03125 | 2 |
proc_1040.py | ketancmaheshwari/Tributum | 0 | 12791406 | <gh_stars>0
#!/usr/bin/env python3
"""
Programs for processing form 1040
"""
import toml
def deps(dict_1040):
"""
A function to calculate no. of dependents.
This currently goes up to 4 dependents.
"""
dep_count = 0
#while
counter = 1
while counter <= 4 and dict_1040["Dep" + str(counter)]["FN_LN"] != "":
counter += 1
dep_count += 1
return dep_count
def proc_sched_B(dict_sched_B):
print(dict_sched_B["Part3_Foreign_Accounts_Trusts"]["i7a"])
def start():
"""
This is the main function.
"""
d_1040 = toml.load("f1040.case1.toml")
#print(d_1040)
#print(d_1040["Dependents"]["Dep1"])
#print(d_1040["Dep1"]["FN_LN"])
#print(d_1040["Address"]["Street"])
print(deps(d_1040))
if d_1040["Main"]["i2a"] > 0 or d_1040["Main"]["i3a"] > 0:
d_sched_B = toml.load("sched_B.case1.toml")
proc_sched_B(d_sched_B)
if __name__ == "__main__":
start()
| 2.984375 | 3 |
descarteslabs/common/graft/interpreter/__init__.py | descarteslabs/descarteslabs-python | 167 | 12791407 | <gh_stars>100-1000
from .interpreter import interpret
from . import exceptions
from .scopedchainmap import ScopedChainMap
__all__ = ["interpret", "exceptions", "ScopedChainMap"]
| 1.179688 | 1 |
skyline/functions/database/queries/related_to_metric_groups.py | datastreaming/skyline-1 | 396 | 12791408 | <reponame>datastreaming/skyline-1
"""
Get anomalies for a metric id
"""
import logging
import traceback
from ast import literal_eval
from sqlalchemy.sql import select
from database import get_engine, engine_disposal, metric_group_table_meta
from functions.metrics.get_base_name_from_metric_id import get_base_name_from_metric_id
def related_to_metric_groups(current_skyline_app, base_name, metric_id):
"""
Returns a dict of all the metric_groups that a metric is part of.
"""
current_skyline_app_logger = current_skyline_app + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
related_to_metric_groups_dict = {}
related_to_metric_groups_dict['metric'] = base_name
related_to_metric_groups_dict['metric_id'] = metric_id
related_to_metric_groups_dict['related_to_metrics'] = {}
try:
engine, fail_msg, trace = get_engine(current_skyline_app)
if fail_msg != 'got MySQL engine':
current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine fail_msg - %s' % str(fail_msg))
if trace != 'none':
current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine trace - %s' % str(trace))
except Exception as err:
current_logger.error(traceback.format_exc())
current_logger.error('error :: related_to_metric_groups :: could not get a MySQL engine - %s' % str(err))
if engine:
try:
metric_group_table, fail_msg, trace = metric_group_table_meta(current_skyline_app, engine)
if fail_msg != 'metric_group meta reflected OK':
current_logger.error('error :: related_to_metric_groups :: could not get metric_group_table_meta fail_msg - %s' % str(fail_msg))
if trace != 'none':
current_logger.error('error :: related_to_metric_groups :: could not get metric_group_table_meta trace - %s' % str(trace))
except Exception as err:
current_logger.error(traceback.format_exc())
current_logger.error('error :: related_to_metric_groups :: metric_group_table_meta - %s' % str(err))
try:
connection = engine.connect()
if metric_id:
stmt = select([metric_group_table]).where(metric_group_table.c.related_metric_id == metric_id).order_by(metric_group_table.c.avg_coefficient.desc())
else:
stmt = select([metric_group_table])
results = connection.execute(stmt)
for row in results:
group_metric_id = row['metric_id']
group_base_name = None
try:
group_base_name = get_base_name_from_metric_id(current_skyline_app, group_metric_id)
except Exception as err:
current_logger.error('error :: related_to_metric_groups :: base_name_from_metric_id failed to determine base_name from metric_id: %s - %s' % (
str(group_metric_id), str(err)))
if group_base_name:
related_to_metric_groups_dict['related_to_metrics'][group_base_name] = dict(row)
connection.close()
except Exception as err:
current_logger.error(traceback.format_exc())
current_logger.error('error :: related_to_metric_groups :: failed to build metric_groups dict - %s' % str(err))
if engine:
engine_disposal(current_skyline_app, engine)
for related_metric in list(related_to_metric_groups_dict['related_to_metrics'].keys()):
for key in list(related_to_metric_groups_dict['related_to_metrics'][related_metric].keys()):
if 'decimal.Decimal' in str(type(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])):
related_to_metric_groups_dict['related_to_metrics'][related_metric][key] = float(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])
if 'datetime.datetime' in str(type(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])):
related_to_metric_groups_dict['related_to_metrics'][related_metric][key] = str(related_to_metric_groups_dict['related_to_metrics'][related_metric][key])
if key == 'shifted_counts':
try:
shifted_counts_str = related_to_metric_groups_dict['related_to_metrics'][related_metric][key].decode('utf-8')
shifted_counts = literal_eval(shifted_counts_str)
except AttributeError:
shifted_counts = related_to_metric_groups_dict['related_to_metrics'][related_metric][key]
related_to_metric_groups_dict['related_to_metrics'][related_metric][key] = shifted_counts
# Remap the metric_id and related_metric_id for clarity
related_to_metric_groups_dict['related_to_metrics'][related_metric]['related_to_metric_id'] = related_to_metric_groups_dict['related_to_metrics'][related_metric]['metric_id']
related_to_metric_groups_dict['related_to_metrics'][related_metric]['metric_id'] = metric_id
del related_to_metric_groups_dict['related_to_metrics'][related_metric]['related_metric_id']
return related_to_metric_groups_dict
| 2.359375 | 2 |
mwbase/admin.py | uw-ictd/mwbase | 1 | 12791409 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.http.response import HttpResponse
from django.http import JsonResponse
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.urls import path, reverse
from django.utils import html
from openpyxl.writer.excel import save_virtual_workbook
import utils.admin as utils
# Local Imports
from mwbase import models as mwbase
from mwbase.forms import ImportXLSXForm
from mwbase.utils import sms_bank
import swapper
AutomatedMessage = swapper.load_model("mwbase", "AutomatedMessage")
Participant = swapper.load_model("mwbase", "Participant")
StatusChange = swapper.load_model("mwbase", "StatusChange")
class ConnectionInline(admin.TabularInline):
model = mwbase.Connection
extra = 0
class NoteInline(admin.TabularInline):
model = mwbase.Note
extra = 1
def mark_quit(modeladmin, request, queryset):
''' mark all mwbase in queryset as quit and save '''
for c in queryset:
c.set_status('quit', comment='Status set from bulk quit action')
mark_quit.short_description = 'Mark participant as quit'
def revert_status(modeladmin, request, queryset):
''' set the status for each participant in queryset to their previous status '''
for c in queryset:
old_status = c.statuschange_set.last().old
c.set_status(old_status, comment='Status reverted from bulk action')
revert_status.short_description = 'Revert to last status'
@admin.register(Participant)
class ParticipantAdmin(admin.ModelAdmin):
list_display = ('study_id', 'display_name', 'preg_status', 'sms_status', 'description', 'facility', 'phone_number', 'due_date', 'language', 'send_day', 'is_validated', 'created')
list_display_links = ('study_id', 'display_name')
list_filter = ('facility', 'study_group', ('created', admin.DateFieldListFilter), 'preg_status', 'is_validated', 'language', 'send_day')
ordering = ('study_id',)
search_fields = ('study_id', 'display_name', 'connection__identity', 'anc_num')
readonly_fields = ('last_msg_client', 'last_msg_system', 'created', 'modified')
inlines = (ConnectionInline, NoteInline)
actions = (mark_quit, revert_status,)
class ParticipantAdminMixin(object):
participant_field = 'participant'
def participant_name(self, obj):
participant = getattr(obj, self.participant_field)
if participant is not None:
return html.format_html(
"<a href='../participant/{0.pk}'>({0.study_id}) {0.display_name}</a>".format(participant))
participant_name.short_description = 'SMS Name'
participant_name.admin_order_field = '{}__study_id'.format(participant_field)
def facility(self, obj):
participant = getattr(obj, self.participant_field)
if participant is not None:
return participant.facility.capitalize()
facility.admin_order_field = '{}__facility'.format(participant_field)
def study_id(self, obj):
return getattr(obj, self.participant_field).study_id
study_id.short_description = 'Study ID'
study_id.admin_order_field = '{}__study_id'.format(participant_field)
def phone_number(self, obj):
connection = getattr(obj, self.participant_field).connection()
if connection is not None:
return html.format_html("<a href='../connection/{0.pk}'>{0.identity}</a>".format(connection))
phone_number.short_description = 'Number'
phone_number.admin_order_field = '{}__connection__identity'.format(participant_field)
@admin.register(mwbase.Message)
class MessageAdmin(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('text', 'participant_name', 'identity', 'is_system', 'is_outgoing', 'is_reply', 'external_status', 'translation_status', 'created')
list_filter = ('is_system', 'is_outgoing', 'external_status', ('participant', utils.NullFieldListFilter), ('created', admin.DateFieldListFilter), 'connection__participant__facility', 'translation_status', 'is_related', 'external_success')
date_hierarchy = 'created'
search_fields = ('participant__study_id', 'participant__display_name', 'connection__identity')
readonly_fields = ('created', 'modified')
def identity(self, obj):
return html.format_html("<a href='./?q={0.identity}'>{0.identity}</a>".format(obj.connection))
identity.short_description = 'Number'
identity.admin_order_field = 'connection__identity'
@admin.register(mwbase.PhoneCall)
class PhoneCallAdmin(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('comment', 'participant_name', 'phone_number', 'outcome', 'is_outgoing', 'created')
date_hierarchy = 'created'
list_filter = ('outcome', 'is_outgoing')
readonly_fields = ('created', 'modified')
search_fields = ('participant__study_id', 'participant__display_name')
@admin.register(mwbase.Note)
class NoteAdmin(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('participant_name', 'comment', 'created')
date_hierarchy = 'created'
@admin.register(mwbase.Connection)
class ConnectionAdmin(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('identity', 'participant_name', 'facility', 'is_primary')
search_fields = ('participant__study_id', 'participant__display_name', 'identity')
@admin.register(mwbase.Visit)
class VisitAdmin(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('study_id', 'participant_name', 'visit_type', 'scheduled',
'notification_last_seen', 'notify_count', 'arrived', 'status')
date_hierarchy = 'scheduled'
list_filter = ('status', 'visit_type', 'arrived', 'scheduled')
search_fields = ('participant__study_id', 'participant__display_name')
@admin.register(mwbase.ScheduledPhoneCall)
class ScheduledPhoneCall(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('study_id', 'participant_name', 'call_type', 'scheduled',
'notification_last_seen', 'notify_count', 'arrived', 'status')
date_hierarchy = 'scheduled'
list_filter = ('status', 'call_type', 'arrived', 'scheduled')
search_fields = ('participant__study_id', 'participant__display_name')
@admin.register(mwbase.Practitioner)
class PractitionerAdmin(admin.ModelAdmin):
list_display = ('facility', 'username', 'password_changed')
@admin.register(StatusChange)
class StatusChangeAdmin(admin.ModelAdmin, ParticipantAdminMixin):
list_display = ('comment', 'participant_name', 'old', 'new', 'type', 'created')
search_fields = ('participant__study_id', 'participant__display_name')
@admin.register(mwbase.EventLog)
class EventLogAdmin(admin.ModelAdmin):
list_display = ('user', 'event', 'created')
class PractitionerInline(admin.TabularInline):
model = mwbase.Practitioner
class UserAdmin(UserAdmin):
inlines = (PractitionerInline,)
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
@admin.register(AutomatedMessage)
class AutomatedMessageAdmin(admin.ModelAdmin):
list_display = ('description', 'english')
list_filter = ('send_base', 'condition', 'group')
change_list_template = "admin/mwbase/automatedmessage/change_list.html"
smsbank_check_template = "admin/mwbase/automatedmessage/sms_bank_check.html"
smsbank_import_template = "admin/mwbase/automatedmessage/sms_bank_import.html"
def changelist_view(self, request, extra_context=None):
extra_context = extra_context or {}
extra_context['form'] = ImportXLSXForm
return super(AutomatedMessageAdmin, self).changelist_view(request, extra_context=extra_context)
def get_urls(self):
urls = super().get_urls()
my_urls = [
path(r'smsbank_check_view/', self.admin_site.admin_view(self.smsbank_check_view), name='smsbank_check_view'),
path(r'smsbank_import_view/', self.admin_site.admin_view(self.smsbank_import_view), name='smsbank_import_view'),
path(r'smsbank_create_xlsx/', self.admin_site.admin_view(self.smsbank_create_xlsx), name='smsbank_create_xlsx')
]
urls = my_urls + urls
return urls
def smsbank_create_xlsx(self, request, extra_context=None):
wb = sms_bank.create_xlsx()
response = HttpResponse(save_virtual_workbook(wb), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename="smsbank.xlsx"'
return response
def smsbank_import_view(self, request, extra_context=None):
opts = self.model._meta
app_label = opts.app_label
form = ImportXLSXForm(request.POST or None, request.FILES or None)
counts, existing, diff= [], [], []
error = ""
if request.method == 'POST':
if form.is_valid():
file = form.cleaned_data.get("file")
# try:
counts, existing, diff= sms_bank.import_messages(file)
# except Exception as e:
# print(e)
# error = "There was an error importing the given file. Please try again."
context = {
**self.admin_site.each_context(request),
'module_name': str(opts.verbose_name_plural),
'opts': opts,
'counts': counts,
'existing': existing,
'diff': diff,
'error': error,
**(extra_context or {}),
}
return TemplateResponse(request, self.smsbank_import_template or [
'admin/%s/%s/sms_bank_import.html' % (app_label, opts.model_name),
'admin/%s/sms_bank_import.html' % app_label,
'admin/sms_bank_import.html'
], context)
def smsbank_check_view(self, request, extra_context=None):
opts = self.model._meta
app_label = opts.app_label
items = duplicates = descriptions = total = None
form = ImportXLSXForm(request.POST or None, request.FILES or None)
if request.method == 'POST':
if form.is_valid():
file = form.cleaned_data.get("file")
(items, duplicates, descriptions, total, errors ) = sms_bank.check_messages(file)
url = reverse('admin:smsbank_import_view')
response = JsonResponse({
'url': url,
'duplicates': duplicates,
'errors': errors,
'total': total,
'success': True,
})
return response
else:
return JsonResponse({'success': False, 'message': 'Form Invalid',})
else:
return JsonResponse({'success': False, 'message': 'Invalid method',})
| 1.992188 | 2 |
scripts/iemre/db_to_netcdf.py | jamayfieldjr/iem | 1 | 12791410 | <gh_stars>1-10
"""Copy database grids to netcdf.
Example: python db_to_netcdf.py <year> <month> <day> <utchour>
If hour and minute are omitted, this is a daily copy, otherwise hourly.
see: akrherz/iem#199
"""
import sys
import datetime
import numpy as np
from pyiem.util import utc, ncopen, logger
from pyiem import iemre
def main(argv):
"""Go Main Go."""
log = logger()
if len(argv) == 6:
valid = utc(
int(argv[1]), int(argv[2]), int(argv[3]), int(argv[4]))
ncfn = iemre.get_hourly_ncname(valid.year)
idx = iemre.hourly_offset(valid)
else:
valid = datetime.date(int(argv[1]), int(argv[2]), int(argv[3]))
ncfn = iemre.get_daily_ncname(valid.year)
idx = iemre.daily_offset(valid)
ds = iemre.get_grids(valid)
with ncopen(ncfn, 'a', timeout=600) as nc:
for vname in ds:
if vname not in nc.variables:
continue
log.debug("copying database var %s to netcdf", vname)
# Careful here, ds could contain NaN values
nc.variables[vname][idx, :, :] = np.ma.array(
ds[vname].values, mask=np.isnan(ds[vname].values)
)
if __name__ == '__main__':
main(sys.argv)
| 2.90625 | 3 |
meshes/read_brain_mesh_3D.py | AasmundResell/FEniCS-Brain-Flow | 0 | 12791411 | from fenics import *
from matplotlib.pyplot import show
def read_brain_mesh_3D():
path = "/home/asmund/dev/FEniCS-Brain-Flow/meshes/parenchyma16_with_DTI.h5"
mesh = Mesh()
#hdf = HDF5File(mesh.mpi_comm(),path , "r")
#hdf.read(mesh, "/mesh", False)
SD = MeshFunction("size_t", mesh,mesh.topology().dim())
#hdf.read(SD, "/subdomains")
bnd = MeshFunction("size_t", mesh,mesh.topology().dim()-1)
#hdf.read(bnd, "/boundaries")
#lookup_table = MeshFunction("size_t", mesh, mesh.topology().dim())
#hdf.read(lookup_table, '/lookup_table')
#TensorSpace = TensorFunctionSpace(mesh, 'DG', 0)
#MDSpace = FunctionSpace(mesh, 'DG', 0)
#MD = Function(MDSpace)
#Kt = Function(TensorSpace)
#hdf.read(MD, '/MD')
#hdf.read(Kt, '/DTI')
#File('subdomains.pvd')<<SD
#File('bnd.pvd')<<bnd
return mesh,SD,bnd
def read_brain_scale(mesh):
dx = Measure("dx", domain=mesh)
tot_parenchyma_vol = assemble(1*dx)
vol_scale = 1.0/tot_parenchyma_vol
print("Volume of parenchyma in mm³: ",tot_parenchyma_vol)
return vol_scale
if __name__ == "__main__":
mesh = read_brain_mesh_3D()
scale = read_brain_scale(mesh)
| 2.34375 | 2 |
djangosheet/migrations/0004_lineupentry_ordering.py | seandw/djangosheet | 2 | 12791412 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('djangosheet', '0003_lineup_1to1'),
]
operations = [
migrations.AlterModelOptions(
name='lineupentry',
options={'verbose_name_plural': 'lineup entries', 'ordering': ['batting_position']},
),
]
| 1.554688 | 2 |
lib/bnet/cache.py | theonewolf/siegvswolf | 0 | 12791413 | import logging
from .util import raw_get
from google.appengine.api.taskqueue import TaskAlreadyExistsError
from google.appengine.api.taskqueue import TombstonedTaskError
from google.appengine.ext import ndb
from google.appengine.ext import deferred
from datetime import datetime
from time import time
CACHE_TIMEOUT = 30
def update_cache(self, endpoint, **kwargs):
data = raw_get(self, endpoint, **kwargs)
key = ndb.Key(CachedResponse, endpoint)
cr = key.get()
cr.data = data
cr.put()
def cached(timeout=CACHE_TIMEOUT):
def func_wrapper(func):
def cached_check(self, endpoint, **kwargs):
key = ndb.Key(CachedResponse, endpoint)
cr = key.get()
if not cr:
data = func(self, endpoint, **kwargs)
cr = CachedResponse(key=key,
endpoint=endpoint,
data=data)
cr.put()
else:
oldtime = cr.timestamp
ts = time()
currtime = datetime.utcfromtimestamp(ts)
td = currtime - oldtime
if td.seconds > timeout:
try:
task_name = endpoint.replace('/', '-') + \
'-%d' % (int(ts))
deferred.defer(update_cache, self, endpoint,
_name=task_name, **kwargs)
except TaskAlreadyExistsError:
logging.critical('Task <%s> already exists.' %
task_name)
logging.critical('Could not update cache.')
except TombstonedTaskError:
logging.critical('Tombstoned task <%s> encountered.' %
task_name)
logging.critical('Attempting to serve old cache data.')
logging.critical('Stored timestamp was: %s' %
str(cr.timestamp))
logging.critical('Current time is: %s' % str(currtime))
return cr.data
return cached_check
return func_wrapper
class CachedResponse(ndb.Model):
endpoint = ndb.StringProperty('e', required=True, indexed=True)
data = ndb.JsonProperty('d', required=True)
timestamp = ndb.DateTimeProperty('t', auto_now=True)
| 2.09375 | 2 |
ex08-BibliotecaMath.py | vnnstar/Python-Mundo1-CursoEmVideo | 0 | 12791414 | <reponame>vnnstar/Python-Mundo1-CursoEmVideo<filename>ex08-BibliotecaMath.py
import math
n = int(input('Informe um valor inteiro: '))
print('Esté é o número digitado {}, este é seu antecessor {}, este é seu'
' sucessor {}.'.format(n, n - 1, n + 1))
n1 = int(input('Informe um valor inteiro: '))
raiz = math.sqrt(n1) # também pode ser feito como raiz = n1 ** (1/2)
print('O dobro é {}, o triplo é {}, a raiz quadrada é {}.'
.format(n1 * 2, n1 * 3, raiz))
n2 = float(input('Informe uma nota de 0 a 10: '))
n3 = float(input('Informe uma segunda nota de 0 a 10: '))
media = ((n2 + n3) / 2)
print('A sua média é {:.2f}'.format(media))
metro = float(input('Digite um valor em metros para ser convertido: '))
centimetro = metro * 100
milimetro = metro * 1000
print('Este valor de {} metros, é igual a {:.0f} centimetros e {:.0f}'
'milimetros.'.format(metro, centimetro, milimetro))
tabuada = int(input('Informe um número para apresentar sua tabuada'))
contador = 0
for count in range(1, 10 + 1):
print('{} x {} = {}'. format(tabuada, count, (tabuada * count)))
# OUTRA FORMA DE FAZER A TABUADA, count é a mesma coisa que X ou a mesma
# coisa que um contador no WHILE
| 3.5625 | 4 |
vindauga/widgets/color_item_list.py | gabbpuy/vindauga | 5 | 12791415 | <reponame>gabbpuy/vindauga<gh_stars>1-10
# -*- coding: utf-8 -*-
import logging
from vindauga.constants.colors import cmSaveColorIndex, cmNewColorIndex, cmNewColorItem
from vindauga.constants.event_codes import evBroadcast
from vindauga.misc.message import message
from .list_viewer import ListViewer
logger = logging.getLogger(__name__)
class ColorItemList(ListViewer):
"""
The interrelated classes `ColorItem`, `ColorGroup`, `ColorSelector`,
`MonoSelector`, `ColorDisplay`, `ColorGroupList`, `ColorItemList` and
`ColorDialog` provide viewers and dialog boxes from which the user can
select and change the color assignments from available palettes with
immediate effect on the screen.
`ColorItemList` is a simpler variant of `ColorGroupList` for viewing and
selecting single color items rather than groups of colors.
Like `ColorGroupList`, `ColorItemList` is specialized derivative of
`ListViewer`. Color items can be selected in any of the usual ways (by
mouse or keyboard).
Unlike `ColorGroupList`, `ColorItemList` overrides the `ListViewer`
event handler.
"""
name = 'ColorItemList'
def __init__(self, bounds, scrollBar, items):
super().__init__(bounds, 1, 0, scrollBar)
self._items = items
self.eventMask |= evBroadcast
self.setRange(len(items))
def focusItem(self, item):
"""
Selects the given item by calling `super().focusItem(item)`, then
broadcasts a `cmNewColorIndex` event.
:param item: Item number to focus
"""
super().focusItem(item)
message(self.owner, evBroadcast, cmSaveColorIndex, item)
curItem = self._items[item]
message(self.owner, evBroadcast, cmNewColorIndex, curItem.index)
def getText(self, item, maxChars):
curItem = self._items[item]
return curItem.name[:maxChars]
def handleEvent(self, event):
super().handleEvent(event)
if event.what == evBroadcast:
g = event.message.infoPtr
command = event.message.command
if command == cmNewColorItem:
self._items = g.items
self.setRange(len(g.items))
self.focusItem(g.index)
self.drawView()
| 2.640625 | 3 |
geophys_utils/dataset_metadata_cache/__init__.py | GeoscienceAustralia/geophys_utils | 18 | 12791416 | '''
Created on 20 Jul. 2018
@author: Alex
'''
from ._dataset_metadata_cache import settings, DatasetMetadataCache, Dataset, Distribution
from ._postgres_dataset_metadata_cache import PostgresDatasetMetadataCache
from ._sqlite_dataset_metadata_cache import SQLiteDatasetMetadataCache
def get_dataset_metadata_cache(db_engine='SQLite', *args, **kwargs):
'''
Class factory function to return subclass of DatasetMetadataCache for specified db_engine
'''
if db_engine == 'SQLite':
return SQLiteDatasetMetadataCache(*args, **kwargs)
elif db_engine == 'Postgres':
return PostgresDatasetMetadataCache(*args, **kwargs)
else:
raise BaseException('Unhandled db_engine "{}"'.format(db_engine)) | 2.109375 | 2 |
mutJacobMethod.py | vicentepese/PSM-Narcolepsy | 1 | 12791417 | import numpy as np
import sys
import os
import json
import csv
import re
import random
import subprocess
from markdown2 import Markdown
from Bio import Entrez
from Bio import SeqIO
from collections import defaultdict, OrderedDict
from scipy import stats
from utils import getBindingCore, importBindData,\
importData, reference_retreive, div0, getBindingCore, getRandomColor
def statisticalTest(options, seqMut, vaccSample, refProt):
# Initialize
MUT_stats = defaultdict(lambda: defaultdict(lambda : defaultdict(lambda: defaultdict(int))))
# For each position
for pos in range(options['pos_range'][0], options['pos_range'][1]+1):
if pos in list(seqMut.keys()):
for ptm in list(seqMut[pos].keys()):
if 'PAN' and 'ARP' in list(seqMut[pos][ptm].keys()):
# Create array
ptm_positive = [seqMut[pos][ptm]['ARP'], seqMut[pos][ptm]['PAN']]
ptm_negative = [vaccSample[pos]['ARP'] - seqMut[pos][ptm]['ARP'], \
vaccSample[pos]['PAN'] - seqMut[pos][ptm]['PAN']]
# Fisher test and append to output
oddsratio, pvalue = stats.fisher_exact([ptm_positive, ptm_negative])
MUT_stats[pos][ptm]['ARP']['pvalue'] = pvalue
MUT_stats[pos][ptm]['ARP']['oddsratio'] = oddsratio
if 'PAN' and 'FOC' in list(seqMut[pos][ptm].keys()):
# Create array
ptm_positive = [seqMut[pos][ptm]['FOC'], seqMut[pos][ptm]['PAN']]
ptm_negative = [vaccSample[pos]['FOC'] - seqMut[pos][ptm]['FOC'], \
vaccSample[pos]['PAN'] - seqMut[pos][ptm]['PAN']]
# Fisher test and append to output
oddsratio, pvalue = stats.fisher_exact([ptm_positive, ptm_negative])
MUT_stats[pos][ptm]['FOC']['pvalue'] = pvalue
MUT_stats[pos][ptm]['FOC']['oddsratio'] = oddsratio
return MUT_stats
def mapMutations(data, refProt, options):
# Initialize outputs
seqMUT = defaultdict(lambda: defaultdict(lambda : defaultdict(int)))
vaccSample = defaultdict(lambda: defaultdict((int)))
# For each sequence
for seq in data:
# Initialize: sequence with and without PTM, initial position
AAseq = seq[1][2:-2]
AAnonPTM = re.sub('\[.+?\]', '', AAseq)
init_pos = int(seq[2])
# Check for mutations
for AA, pos in zip(AAnonPTM, range(init_pos, init_pos + len(AAnonPTM))):
# Count instances
vaccSample[pos][seq[3]] += 1
# If there is a mutation append
if AA is not refProt[pos]:
seqMUT[pos][AA][seq[3]] += 1
# Filter positions where there is no samples from any of the
# vaccines
for pos in list(seqMUT.keys()):
for ptm in list(seqMUT[pos].keys()):
if not(seqMUT[pos][ptm]['ARP'] and seqMUT[pos][ptm]['PAN']) \
and not(seqMUT[pos][ptm]['FOC'] and seqMUT[pos][ptm]['PAN']):
del seqMUT[pos][ptm]
if len(seqMUT[pos]) < 1:
del seqMUT[pos]
return seqMUT, vaccSample
def map2HTML(options, coreIdxs, coreClass, refProt, MUT_stats, seqMut, vaccSample):
# Initialize
PTM_HTML = list()
markdowner = Markdown()
color = getRandomColor(options)
refProt = ''.join([refProt[pos] for pos in refProt])
# In blocks of 70, while smaller than the length of the protein of reference
i = 0
while i < len(refProt):
# Create string of reference protein (taking 70 AA)
refProtStr = refProt[i:i+70]
count = 0
# For each binding core and class
for core, coreCl in zip(coreIdxs, coreClass):
# If initial position of the core overlaps with that fragment
if core[0] in range(i, i + 70):
# If no previous hightlight
if count == 0:
# Update core idxes, and highlight based on class
core = [idx -i for idx in core]
if coreCl == 'strong':
refProtStr = refProtStr[0:core[0]] + color['strongBinder'][0] + refProtStr[core[0]:core[1]] + \
color['strongBinder'][1] + refProtStr[core[1]:]
count += 1
else:
refProtStr = refProtStr[0:core[0]] + color['weakBinder'][0] + refProtStr[core[0]:core[1]] + \
color['weakBinder'][1] + refProtStr[core[1]:]
count += 1
# If previous binding core in segment, update idx and highlight based on class
else:
if coreCl == 'strong':
core = [idx - i + count*(len(color['strongBinder'][0]) + len(color['strongBinder'][1])) for idx in core]
refProtStr = refProtStr[0:core[0]] + color['strongBinder'][0] + refProtStr[core[0]:core[1]] + \
color['strongBinder'][1] + refProtStr[core[1]:]
count += 1
else:
core = [idx - i + count*(len(color['strongBinder'][0]) + len(color['strongBinder'][1])) for idx in core]
refProtStr = refProtStr[0:core[0]] + color['weakBinder'][0] + refProtStr[core[0]:core[1]] + \
color['weakBinder'][1] + refProtStr[core[1]:]
count += 1
# If ending position of the core overlaps with the fragment: same
elif core[1] in range(i, i + 70):
core = [idx -i for idx in core]
core = [0 if idx < 0 else idx for idx in core]
if coreCl == 'strong':
refProtStr = color['strongBinder'][0] + refProtStr[core[0]:core[1]] + \
color['strongBinder'][1] + refProtStr[core[1]:]
count += 1
else:
refProtStr = color['weakBinder'][0] + refProtStr[core[0]:core[1]] + \
color['weakBinder'][1] + refProtStr[core[1]:]
count += 1
# Append to HTML output
refProtStr = str(i+1) + '.' + ' '*(6 -len(str(i))-1) + refProtStr + '\n'
PTM_HTML.append(markdowner.convert(refProtStr))
# Create PAN string: same as ARP string
PAN_str = color['PAN'][0] + 'PAN: ' + color['PAN'][1]
last_pos = 0
for pos in range(i,i+70):
if pos in list(seqMut.keys()):
if any(seqMut[pos][mut]['PAN'] for mut in seqMut[pos]):
PAN_str = PAN_str + color['PAN'][0] + '—'*(pos - last_pos -1 - i) + color['PAN'][1] + refProt[pos-1]
last_pos = pos - i
PAN_str = PAN_str + color['PAN'][0] + '—'*(70 - last_pos) + color['PAN'][1]
PTM_HTML.append(markdowner.convert(PAN_str))
# Create ARP string, highlighting positions of PTMs, and append
ARP_str = color['ARP'][0] + 'ARP: ' + color['ARP'][1]
mut_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
last_pos = 0
for pos in range(i,i+70):
if pos in list(seqMut.keys()):
if any(seqMut[pos][mut]['ARP'] for mut in seqMut[pos]):
ARP_str = ARP_str + color['ARP'][0] + '—'*(pos - last_pos -1 - i) + color['ARP'][1] + refProt[pos-1]
for mut in seqMut[pos]:
mut_dict[pos][mut]['ARP'] = seqMut[pos][mut]['ARP']
last_pos = pos - i
ARP_str = ARP_str + color['ARP'][0] + '—'*(70 - last_pos) + color['ARP'][1]
PTM_HTML.append(markdowner.convert(ARP_str))
# Create FOC string, highlighting positions of PTMs, and append
FOC_str = color['FOC'][0] + 'FOC: ' + color['FOC'][1]
last_pos = 0
for pos in range(i,i+70):
if pos in list(seqMut.keys()):
if any(seqMut[pos][mut]['FOC'] for mut in seqMut[pos]):
FOC_str = FOC_str + color['FOC'][0] + '—'*(pos - last_pos -1 - i) + color['FOC'][1] + refProt[pos-1]
for mut in seqMut[pos]:
mut_dict[pos][mut]['FOC'] = seqMut[pos][mut]['FOC']
last_pos = pos - i
FOC_str = FOC_str + color['FOC'][0] + '—'*(70 - last_pos) + color['FOC'][1]
PTM_HTML.append(markdowner.convert(FOC_str))
# Create strings for each PTM positon and type
for pos in list(mut_dict.keys()):
for mut in list(mut_dict[pos].keys()):
for vacc in list(mut_dict[pos][mut].keys()):
if mut_dict[pos][mut][vacc] > 0:
vacc_prop = seqMut[pos][mut][vacc]/vaccSample[pos][vacc]
vacc_samp = vaccSample[pos][vacc]
PAN_prop = seqMut[pos][mut]['PAN']/vaccSample[pos]['PAN']
PAN_samp = vaccSample[pos]['PAN']
PAN_mut_str = ' '*(pos -i -3+ 6) + \
color['mut'][0] + mut + color['mut'][1] + \
'(' + vacc + ':{:.2%}({}),PAN:{:.2%}({}),'.format(vacc_prop, vacc_samp, PAN_prop, PAN_samp)
if pos in list(MUT_stats.keys()) and vacc in list(MUT_stats[pos][mut].keys()) \
and MUT_stats[pos][mut][vacc]['pvalue'] < 0.05:
PAN_mut_str = PAN_mut_str + color['red'][0] + 'p={:.2}'.format(MUT_stats[pos][mut][vacc]['pvalue']) + '\n'
elif pos in list(MUT_stats.keys()) and vacc in list(MUT_stats[pos][mut].keys()):
PAN_mut_str = PAN_mut_str + 'p={:.2})'.format(MUT_stats[pos][mut][vacc]['pvalue']) + '\n'
PTM_HTML.append(markdowner.convert(PAN_mut_str))
# Separate
PTM_HTML.append(markdowner.convert(' \n'))
# Update index
i += 70
# Print and save
with open(options['html']["scroll-template"], 'r') as inFile:
with open(options['files']['mutMapJacob.html'], 'w') as outFile:
for line in inFile:
outFile.write(line)
outFile.writelines(PTM_HTML)
def main():
# Read options
with open('options.json','r') as inFile:
options = json.load(inFile)
# Import data
data = importData(options)
# Import protein of reference
refProt = reference_retreive(options['refProt'])
# Get binding cores and binding core positions
coreIdxs, coreClass = getBindingCore(options, refProt)
# Map mutations
seqMut, vaccSample = mapMutations(data, refProt, options)
# Compute Fisher exact test
MUT_stats = statisticalTest(options, seqMut, vaccSample, refProt)
# Create HTML output
map2HTML(options, coreIdxs, coreClass, refProt, MUT_stats, seqMut, vaccSample)
if __name__ == "__main__":
main() | 2.125 | 2 |
orchestration/integration/custom_scripts/script_execution.py | dave-read/vdc | 1 | 12791418 | <reponame>dave-read/vdc
from orchestration.models.script_type import ScriptType
from orchestration.common import helper
class CustomScriptExecution(object):
def execute(
self,
script_type: ScriptType,
command: str,
output_file_path: str = None,
property_path: str = None,
file_path_to_update: str = None) -> dict:
if script_type == ScriptType.POWERSHELL:
from orchestration.integration.custom_scripts.powershell_execution import PowershellScriptExecution
pwsh = PowershellScriptExecution()
result = pwsh.execute(command)
elif script_type == ScriptType.BASH:
from orchestration.integration.custom_scripts.bash_execution import BashScriptExecution
bash = BashScriptExecution()
result = bash.execute(command)
else:
return ValueError('Invalid type received')
if output_file_path is not None and\
len(output_file_path) > 0:
self.save_json_file(
result,
output_file_path)
if property_path is not None and\
len(property_path) > 0 and\
file_path_to_update is not None and\
len(file_path_to_update) > 0:
self.modify_json_file(
result= result,
property_path= property_path,
file_path_to_update= file_path_to_update)
return result['output']
def save_json_file(
self,
result: dict,
output_file_path: str):
helper.save_json_file(
result['output'],
output_file_path)
def modify_json_file(
self,
result: dict,
property_path: str,
file_path_to_update: str):
helper.modify_json_file(
prop_value= result['output'],
prop_key= property_path,
path= file_path_to_update)
| 2.234375 | 2 |
Ver.1/Main.py | AbdAlazezAhmed/TilesPy | 0 | 12791419 | import numpy as np
from grabscreen import grab_screen
from directkeys import Up , Down , PressKey , ReleaseKey , Move1 , Move2
import time
from getkeys import key_check
import cv2
def main () :
while(True) :
#Resize the game window to about less than quarter of the screen at 1920*1080 resolution
screen = cv2.cvtColor(grab_screen(region=(0,0,800,800)),cv2.COLOR_RGB2GRAY)
keys = key_check()
while screen[778,250] < 130 or screen[778,250] > 200 :
if screen[765,250] < 130 or screen[765,250] > 200 :
Move1(307,778)
screen = cv2.cvtColor(grab_screen(region=(0,0,800,800)),cv2.COLOR_RGB2GRAY)
print(screen[778 , 250] )
keys = key_check()
## time.sleep(0.1)
if 'X' in keys:
break
Move2(0,0)
while screen[778 , 360]<130 or screen[778 , 360]>200 :
if screen[765 , 360]<130 or screen[765 , 360]>200 :
Move1(420 , 778)
screen = cv2.cvtColor(grab_screen(region=(0,0,800,800)),cv2.COLOR_RGB2GRAY)
print(screen[778 , 360] )
## time.sleep(0.1)
keys = key_check()
if 'X' in keys:
break
Move2(0,0)
while screen [778 , 480]<130 or screen [778 , 480]>200 :
if screen [765 , 480]<130 or screen [765 , 480]>200 :
Move1(525 , 778)
## time.sleep(0.1)
screen = cv2.cvtColor(grab_screen(region=(0,0,800,800)),cv2.COLOR_RGB2GRAY)
print(screen[778 , 480] )
keys = key_check()
if 'X' in keys:
break
Move2(0,0)
while screen[778 , 590]<130 or screen[778 , 590]>200:
if screen[765 , 590]<130 or screen[765 , 590]>200:
Move1(620 , 778)
## time.sleep(0.1)
screen = cv2.cvtColor(grab_screen(region=(0,0,800,800)),cv2.COLOR_RGB2GRAY)
print(screen[778 , 600] )
keys = key_check()
if 'X' in keys:
break
Move2(0,0)
if 'X' in keys:
break
main()
| 2.734375 | 3 |
src/backend/graph/api/urls.py | pawlaczyk/KTZgraph | 0 | 12791420 | <filename>src/backend/graph/api/urls.py<gh_stars>0
from django.urls import path
from graph.api import views
urlpatterns = [
path('', views.GraphList.as_view(), name='graph-list'),
path('<int:pk>', views.GraphDetail.as_view(), name='graph-list'),
path('create/', views.GraphCreate.as_view(), name='graph-list'),
path('get_graphs/', views.get_graphs, name='get_graphs'),
] | 1.851563 | 2 |
html_parsing/get_game_genres/parsers/squarefaction_ru.py | DazEB2/SimplePyScripts | 117 | 12791421 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from typing import List
from bs4 import BeautifulSoup
from base_parser import BaseParser
class SquarefactionRu_Parser(BaseParser):
def _parse(self) -> List[str]:
url = f'http://squarefaction.ru/main/search/games?q={self.game_name}'
rs = self.send_get(url)
root = BeautifulSoup(rs.content, 'html.parser')
# http://squarefaction.ru/main/search/games?q=dead+space
if '/main/search/games' in rs.url:
self.log_info(f'Parsing of game list')
for game_block in root.select('#games > .entry'):
title = self.get_norm_text(game_block.select_one('.name'))
if not self.is_found_game(title):
continue
# <div class="infos">TPS,Survival Horror,Action</div>
genres = self.get_norm_text(game_block.select_one('.infos')).split(',')
# Сойдет первый, совпадающий по имени, вариант
return genres
# http://squarefaction.ru/game/dead-space
else:
self.log_info(f'Parsing of game page')
game_block = root.select_one('#page-info')
if game_block:
title = self.get_norm_text(game_block.select_one('#title'))
if not self.is_found_game(title):
self.log_warn(f'Not match game title {title!r}')
# <td class="nowraps-links">
# <a href="/games?genre=tps">TPS</a>,
# <a href="/games?genre=survival-horror">Survival Horror</a>,
# <a href="/games?genre=action">Action</a>
# </td>
genres = [
self.get_norm_text(a) for a in game_block.select('a') if '?genre=' in a['href']
]
# Сойдет первый, совпадающий по имени, вариант
return genres
self.log_info(f'Not found game {self.game_name!r}')
return []
def get_game_genres(game_name: str, *args, **kwargs) -> List[str]:
return SquarefactionRu_Parser(*args, **kwargs).get_game_genres(game_name)
if __name__ == '__main__':
from common import _common_test
_common_test(get_game_genres)
# Search 'Hellgate: London'...
# Genres: ['Action RPG']
#
# Search 'The Incredible Adventures of Van Helsing'...
# Genres: ['Action RPG']
#
# Search 'Dark Souls: Prepare to Die Edition'...
# Genres: []
#
# Search 'Twin Sector'...
# Genres: []
#
# Search 'Call of Cthulhu: Dark Corners of the Earth'...
# Genres: ['Survival Horror']
| 2.9375 | 3 |
setup.py | movermeyer/envtool | 3 | 12791422 | <gh_stars>1-10
# -*- encoding: utf-8 -*-
import glob
import io
import re
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
).read()
setup(
name="envtool",
version="0.1.0",
license="BSD",
description="A tool for managing envdirs and env files.",
long_description="%s\n%s" % (read("README.rst"), re.sub(":obj:`~?(.*?)`", r"``\1``", read("CHANGELOG.rst"))),
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/judy2k/envtool",
py_modules=[splitext(basename(i))[0] for i in glob.glob("*.py")],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: Unix",
"Operating System :: POSIX",
# "Operating System :: Microsoft :: Windows",
"Environment :: Console",
# "Intended Audience :: System Administrator",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
# "Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Utilities",
],
keywords=[
"environment", "envdir", "honcho", "foreman", "env",
],
install_requires=[
"future>=0.15.0",
"click>=4.0.0",
],
extras_require={
# eg: 'rst': ["docutils>=0.11"],
},
entry_points={
"console_scripts": [
"envtool=envtool:main",
]
},
cmdclass={'test': PyTest},
tests_require=[
"pytest>=2.7.2",
]
)
| 2 | 2 |
tests/test_rigraph.py | tipech/OverlapGraph | 0 | 12791423 | #!/usr/bin/env python
"""
Unit tests for Regional Intersection Graph -- NetworkX
- test_nxgraph_create
- test_nxgraph_sweepctor
- test_nxgraph_mdsweepctor
- test_nxgraph_sweepctor_graph
- test_nxgraph_sweepctor_random
"""
from io import StringIO
from typing import List, Tuple
from unittest import TestCase
from pprint import pprint
from networkx import networkx as nx
from slig.datastructs.rigraph import RIGraph
from slig.datastructs.region import Region
class TestRIGraph(TestCase):
test_regions: List[Region]
def setUp(self):
self.test_regions = []
self.test_regions.append(Region([0, 0], [5, 5]))
self.test_regions.append(Region([2, 2], [5, 10]))
self.test_regions.append(Region([1, 5], [3, 7]))
self.test_regions.append(Region([-5, 5], [1, 7]))
self.test_regions.append(Region([-5, 5], [2, 7]))
def test_nxgraph_create(self):
graph = RIGraph(dimension=1)
self.assertTrue(graph.G is not None)
self.assertTrue(isinstance(graph.G, nx.Graph))
def test_nxgraph_contains(self):
dimension = self.test_regions[0].dimension
graph = RIGraph(dimension=dimension)
for region in self.test_regions[0:3]:
graph.put_region(region)
self.assertTrue(self.test_regions[0].id in graph)
def test_nxgraph_put_region(self):
dimension = self.test_regions[0].dimension
graph = RIGraph(dimension=dimension)
for region in self.test_regions:
graph.put_region(region)
self.assertEqual(self.test_regions, list(graph.regions))
def test_nxgraph_put_intersect(self):
dimension = self.test_regions[0].dimension
graph = RIGraph(dimension=dimension)
graph.put_region(self.test_regions[0])
graph.put_region(self.test_regions[1])
graph.put_intersection(self.test_regions[0], self.test_regions[1])
intersection = self.test_regions[0].get_intersection(self.test_regions[1])
self.assertEqual(intersection, list(graph.intersections)[0])
def test_nxgraph_to_dict(self):
dimension = self.test_regions[0].dimension
graph = RIGraph(dimension=dimension)
graph.put_region(self.test_regions[0])
graph.put_region(self.test_regions[1])
graph.put_intersection(self.test_regions[0], self.test_regions[1])
intersection = self.test_regions[0].get_intersection(self.test_regions[1])
graphdict = {'id':graph.id,'dimension':dimension,'json_graph':'node_link',
'graph':{
'directed': False, 'multigraph': False, 'graph':{},
'nodes':[{'id':r.id, 'region':r} for r in graph.regions],
'links':[{'source': self.test_regions[0].id,
'target': self.test_regions[1].id,
'region': intersection}]
}}
self.assertEqual(graphdict, graph.to_dict())
def test_nxgraph_from_dict(self):
dimension = self.test_regions[0].dimension
graph = RIGraph(dimension=dimension)
graph.put_region(self.test_regions[0])
graph.put_region(self.test_regions[1])
graph.put_intersection(self.test_regions[0], self.test_regions[1])
self.assertEqual(graph.to_dict(),
RIGraph.from_dict(graph.to_dict()).to_dict()) | 2.421875 | 2 |
wbia_orientation/test.py | WildMeOrg/wbia-plugin-orientation | 1 | 12791424 | # -*- coding: utf-8 -*-
# Written by <NAME> (<EMAIL>)
import os
import pprint
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
from wbia_orientation.config.default import _C as cfg # NOQA
from wbia_orientation.config.default import update_config
from wbia_orientation.core.function import validate
from wbia_orientation.dataset import custom_transforms
from wbia_orientation.dataset.animal import AnimalDataset
from wbia_orientation.train import parse_args, _make_model, _model_to_gpu, _make_loss
from wbia_orientation.utils.utils import create_logger
def _make_test_data(cfg, logger):
"""Initialise train and validation loaders as per config parameters
Input:
cfg: config object
logger: logging object
Returns:
test_loader: Data Loader over test dataset
test_dataset: test dataset object
"""
test_transform = transforms.Compose(
[
custom_transforms.CropObjectAlignedArea(noise=0.0),
custom_transforms.Resize(cfg.MODEL.IMSIZE),
custom_transforms.ToTensor(),
custom_transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
input_size=cfg.MODEL.IMSIZE[0],
),
]
)
test_dataset = AnimalDataset(cfg, cfg.DATASET.TEST_SET, test_transform)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=cfg.TEST.BS * len(cfg.GPUS),
shuffle=False,
num_workers=cfg.WORKERS,
pin_memory=cfg.PIN_MEMORY,
)
return test_loader, test_dataset
def main():
args = parse_args()
update_config(cfg, args)
logger, final_output_dir = create_logger(cfg, args.cfg, 'test', False)
logger.info(pprint.pformat(args))
logger.info(cfg)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
# Initialise models
model = _make_model(cfg, is_train=False)
# Load model weights
if cfg.TEST.MODEL_FILE:
model_state_file = cfg.TEST.MODEL_FILE
else:
model_state_file = os.path.join(final_output_dir, 'best.pth')
logger.info('=> loading model from {}'.format(model_state_file))
if cfg.USE_GPU:
model.load_state_dict(torch.load(model_state_file))
else:
model.load_state_dict(
torch.load(model_state_file, map_location=torch.device('cpu'))
)
model = _model_to_gpu(model, cfg)
# Initialise losses
loss_func = _make_loss(cfg)
# Initialise data loaders
test_loader, test_dataset = _make_test_data(cfg, logger)
# Evaluate on validation set
perf_indicator = validate(
cfg,
test_loader,
test_dataset,
model,
loss_func,
cfg.DATASET.TEST_SET,
final_output_dir,
)
logger.info(
'Final results. Accuracy@{} on {} {} is {:.2%}'.format(
cfg.TEST.THETA_THR, cfg.DATASET.NAME, cfg.DATASET.TEST_SET, perf_indicator
)
)
if __name__ == '__main__':
main()
| 1.921875 | 2 |
chess/piece/pawn.py | foxfluff/chess-py | 0 | 12791425 | <gh_stars>0
from ._piece import chess_piece
class pawn(chess_piece):
def avaliable_moves(self):
raise NotImplementedError
@staticmethod
def legal_moves():
return [[x, 1] for x in range(-1, 2)] + [[0, 2]]
| 3.015625 | 3 |
TWLight/users/migrations/0060_auto_20200804_1634.py | saloniig/TWLight | 0 | 12791426 | # Generated by Django 3.0.9 on 2020-08-04 16:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("resources", "0083_auto_20200804_1634"),
("users", "0059_auto_20200706_1659"),
]
operations = [
migrations.AlterField(
model_name="authorization",
name="partners",
field=models.ManyToManyField(
blank=True,
help_text="The partner(s) for which the editor is authorized.",
limit_choices_to=models.Q(status__in=[0, 2]),
to="resources.Partner",
),
),
migrations.AlterField(
model_name="authorization",
name="stream",
field=models.ForeignKey(
blank=True,
help_text="The stream for which the editor is authorized.",
limit_choices_to=models.Q(partner__status__in=[0, 2]),
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="resources.Stream",
),
),
]
| 1.632813 | 2 |
test/test_algos/test_opt_algorithm/test_racos/test_racos.py | IcarusWizard/ZOOpt | 403 | 12791427 | <filename>test/test_algos/test_opt_algorithm/test_racos/test_racos.py
from zoopt.algos.opt_algorithms.racos.racos_common import RacosCommon
from zoopt.algos.opt_algorithms.racos.sracos import SRacos
from zoopt import Solution, Objective, Dimension, Parameter, Opt, ExpOpt, ValueType, Dimension2
import numpy as np
def ackley(solution):
"""
Ackley function for continuous optimization
"""
x = solution.get_x()
bias = 0.2
ave_seq = sum([(i - bias) * (i - bias) for i in x]) / len(x)
ave_cos = sum([np.cos(2.0 * np.pi * (i - bias)) for i in x]) / len(x)
value = -20 * np.exp(-0.2 * np.sqrt(ave_seq)) - np.exp(ave_cos) + 20.0 + np.e
return value
def sphere_discrete_order(solution):
"""
Sphere function for integer continuous optimization
"""
x = solution.get_x()
value = sum([(i-2)*(i-2) for i in x])
return value
class SetCover:
"""
set cover problem for discrete optimization
this problem has some extra initialization tasks, thus we define this problem as a class
"""
def __init__(self):
self.__weight = [0.8356, 0.5495, 0.4444, 0.7269, 0.9960, 0.6633, 0.5062, 0.8429, 0.1293, 0.7355,
0.7979, 0.2814, 0.7962, 0.1754, 0.0267, 0.9862, 0.1786, 0.5884, 0.6289, 0.3008]
self.__subset = []
self.__subset.append([0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0])
self.__subset.append([0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0])
self.__subset.append([1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0])
self.__subset.append([0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0])
self.__subset.append([1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1])
self.__subset.append([0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0])
self.__subset.append([0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0])
self.__subset.append([0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0])
self.__subset.append([0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0])
self.__subset.append([0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1])
self.__subset.append([0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0])
self.__subset.append([0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1])
self.__subset.append([1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1])
self.__subset.append([1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1])
self.__subset.append([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
self.__subset.append([1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0])
self.__subset.append([1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1])
self.__subset.append([0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1])
self.__subset.append([0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0])
self.__subset.append([0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1])
def fx(self, solution):
"""
Objective function.
:param solution: a Solution object
:return: the value of f(x)
"""
x = solution.get_x()
allweight = 0
countw = 0
for i in range(len(self.__weight)):
allweight += self.__weight[i]
dims = []
for i in range(len(self.__subset[0])):
dims.append(False)
for i in range(len(self.__subset)):
if x[i] == 1:
countw += self.__weight[i]
for j in range(len(self.__subset[i])):
if self.__subset[i][j] == 1:
dims[j] = True
full = True
for i in range(len(dims)):
if dims[i] is False:
full = False
if full is False:
countw += allweight
return countw
@property
def dim(self):
"""
Dimension of set cover problem.
:return: Dimension instance
"""
dim_size = 20
dim_regs = [[0, 1]] * dim_size
dim_tys = [False] * dim_size
return Dimension(dim_size, dim_regs, dim_tys)
class TestRacos(object):
def test_racos_common_extend(self):
a = [1, 2, 3]
b = [2, 3, 4]
assert RacosCommon.extend(a, b) == [1, 2, 3, 2, 3, 4]
def test_racos_common_is_distinct(self):
a = Solution(x=[1, 2, 3])
b = Solution(x=[2, 3, 4])
c = Solution(x=[3, 4, 5])
seti = [a, b]
assert RacosCommon.is_distinct(seti, a) is False and RacosCommon.is_distinct(seti, c) is True
def test_sracos_distance(self):
a = [2, 4]
b = [5, 8]
assert SRacos.distance(a, b) == 5
def test_sracos_binary_search(self):
s0 = Solution(value=0)
s1 = Solution(value=1)
s2 = Solution(value=2)
s3 = Solution(value=3)
s4 = Solution(value=4)
# 1 3 0 2 4
test_s1 = Solution(value=2.1)
test_s2 = Solution(value=4.5)
test_s3 = Solution(value=-1)
test_s4 = Solution(value=2)
set = [s0, s1, s2, s3, s4]
sracos = SRacos()
assert sracos.binary_search(set, test_s1, 0, 4) == 3
assert sracos.binary_search(set, test_s1, 0, 2) == 3
assert sracos.binary_search(set, test_s2, 0, 4) == 5
assert sracos.binary_search(set, test_s3, 0, 4) == 0
assert sracos.binary_search(set, test_s4, 0, 4) == 3
def test_sracos_strategy_wr(self):
s0 = Solution(value=0)
s1 = Solution(value=1)
s2 = Solution(value=2)
s3 = Solution(value=3)
s4 = Solution(value=4)
iset = [s0, s1, s2, s3, s4]
sracos = SRacos()
test_s1 = Solution(value=2.1)
sracos.strategy_wr(iset, test_s1, 'pos')
assert len(iset) == 5 and iset[0].get_value() == 0 and iset[1].get_value() == 1 and iset[2].get_value() == 2 \
and iset[3].get_value() == 2.1 and iset[4].get_value() == 3
iset2 = [s1, s3, s0, s2, s4]
sracos.strategy_wr(iset2, test_s1, 'neg')
assert len(iset2) == 5 and iset2[4].get_value() == 2.1
def test_sracos_strategy_rr(self):
s0 = Solution(value=0)
s1 = Solution(value=1)
s2 = Solution(value=2)
iset = [s0, s1, s2]
sracos = SRacos()
test_s1 = Solution(value=2.1)
sracos.strategy_rr(iset, test_s1)
assert len(iset) == 3 and (iset[0].get_value() == 2.1 or iset[1].get_value() == 2.1 or iset[2].get_value() == 2.1)
def test_sracos_strategy_lm(self):
s0 = Solution(x=[1, 1, 1], value=0)
s1 = Solution(x=[2.2, 2.2, 2.2], value=1)
s2 = Solution(x=[3, 3, 3], value=2)
iset = [s0, s1, s2]
sracos = SRacos()
test_s1 = Solution(x=[2.1, 2.1, 2.1], value=2.1)
sracos.strategy_lm(iset, s0, test_s1)
assert iset[2].get_value() == 2.1
def test_sracos_replace(self):
s0 = Solution(x=[0, 0, 0], value=0.5)
s1 = Solution(x=[1, 1, 1], value=1)
s2 = Solution(x=[2, 2, 2], value=2)
s3 = Solution(x=[3, 3, 3], value=3)
s4 = Solution(x=[4, 4, 4], value=4)
pos_set = [s0, s1, s2, s3, s4]
neg_set = [s2, s3, s1, s4, s0]
x = Solution(x=[2.1, 2.1, 2.1], value=0.1)
sracos = SRacos()
sracos.replace(pos_set, x, 'pos', 'WR')
assert pos_set[4].get_value() == 3 and pos_set[0].get_value() == 0.1
sracos.replace(neg_set, x, 'neg', 'LM')
assert neg_set[3].get_value() == 0.1
def test_racos_performance(self):
# continuous
dim = 100 # dimension
objective = Objective(ackley, Dimension(dim, [[-1, 1]] * dim, [True] * dim)) # setup objective
parameter = Parameter(budget=100 * dim, sequential=False, seed=1)
solution = ExpOpt.min(objective, parameter)[0]
assert solution.get_value() < 0.2
dim = 500
objective = Objective(ackley, Dimension(dim, [[-1, 1]] * dim, [True] * dim)) # setup objective
parameter = Parameter(budget=10000, sequential=False, seed=1)
sol = Opt.min(objective, parameter)
sol.print_solution()
assert solution.get_value() < 2
# discrete
# setcover
problem = SetCover()
dim = problem.dim # the dim is prepared by the class
objective = Objective(problem.fx, dim) # form up the objective function
budget = 100 * dim.get_size() # number of calls to the objective function
parameter = Parameter(budget=budget, sequential=False, seed=777)
sol = Opt.min(objective, parameter)
sol.print_solution()
assert sol.get_value() < 2
# sphere
dim_size = 100 # dimensions
dim_regs = [[-10, 10]] * dim_size # dimension range
dim_tys = [False] * dim_size # dimension type : integer
dim_order = [True] * dim_size
dim = Dimension(dim_size, dim_regs, dim_tys, order=dim_order) # form up the dimension object
objective = Objective(sphere_discrete_order, dim) # form up the objective function
parameter = Parameter(budget=10000, sequential=False, seed=77)
sol = Opt.min(objective, parameter)
sol.print_solution()
assert sol.get_value() < 200
def test_racos_performance2(self):
# continuous
dim = 100 # dimension
one_dim = (ValueType.CONTINUOUS, [-1, 1], 1e-6)
dim_list = [(one_dim)] * dim
objective = Objective(ackley, Dimension2(dim_list)) # setup objective
parameter = Parameter(budget=100 * dim, sequential=False, seed=1)
solution = ExpOpt.min(objective, parameter)[0]
assert solution.get_value() < 0.2
dim = 500
dim_list = [(one_dim)] * dim
objective = Objective(ackley, Dimension2(dim_list)) # setup objective
parameter = Parameter(budget=10000, sequential=False, seed=1)
sol = Opt.min(objective, parameter)
sol.print_solution()
assert solution.get_value() < 2
# discrete
# setcover
problem = SetCover()
dim_size = 20
one_dim = (ValueType.DISCRETE, [0, 1], False)
dim_list = [(one_dim)] * dim_size
dim = Dimension2(dim_list) # the dim is prepared by the class
objective = Objective(problem.fx, dim) # form up the objective function
budget = 100 * dim.get_size() # number of calls to the objective function
parameter = Parameter(budget=budget, sequential=False, seed=777)
sol = Opt.min(objective, parameter)
sol.print_solution()
assert sol.get_value() < 2
# sphere
dim_size = 100 # dimensions
one_dim = (ValueType.DISCRETE, [-10, 10], True)
dim_list = [(one_dim)] * dim_size
dim = Dimension2(dim_list) # form up the dimension object
objective = Objective(sphere_discrete_order, dim) # form up the objective function
parameter = Parameter(budget=10000, sequential=False, seed=77)
sol = Opt.min(objective, parameter)
sol.print_solution()
assert sol.get_value() < 200
def test_sracos_performance(self):
# continuous
dim = 100 # dimension
objective = Objective(ackley, Dimension(dim, [[-1, 1]] * dim, [True] * dim)) # setup objective
parameter = Parameter(budget=100 * dim, seed=77)
solution = Opt.min(objective, parameter)
assert solution.get_value() < 0.2
dim = 500
objective = Objective(ackley, Dimension(dim, [[-1, 1]] * dim, [True] * dim)) # setup objective
parameter = Parameter(budget=10000, seed=777)
solution = Opt.min(objective, parameter)
assert solution.get_value() < 1.5
# discrete
# setcover
problem = SetCover()
dim = problem.dim # the dim is prepared by the class
objective = Objective(problem.fx, dim) # form up the objective function
budget = 100 * dim.get_size() # number of calls to the objective function
parameter = Parameter(budget=budget, seed=777)
sol = Opt.min(objective, parameter)
assert sol.get_value() < 2
# sphere
dim_size = 100 # dimensions
dim_regs = [[-10, 10]] * dim_size # dimension range
dim_tys = [False] * dim_size # dimension type : integer
dim_order = [True] * dim_size
dim = Dimension(dim_size, dim_regs, dim_tys, order=dim_order) # form up the dimension object
objective = Objective(sphere_discrete_order, dim) # form up the objective function
parameter = Parameter(budget=10000)
sol = Opt.min(objective, parameter)
assert sol.get_value() < 200
def test_sracos_performance2(self):
# continuous
dim = 100 # dimension
one_dim = (ValueType.CONTINUOUS, [-1, 1], 1e-6)
dim_list = [(one_dim)] * dim
objective = Objective(ackley, Dimension2(dim_list))
parameter = Parameter(budget=100 * dim, seed=77)
solution = Opt.min(objective, parameter)
assert solution.get_value() < 0.2
dim = 500
one_dim = (ValueType.CONTINUOUS, [-1, 1], 1e-6)
dim_list = [(one_dim)] * dim
objective = Objective(ackley, Dimension2(dim_list)) # setup objective
parameter = Parameter(budget=10000, seed=777)
solution = Opt.min(objective, parameter)
assert solution.get_value() < 1.5
# discrete
# setcover
problem = SetCover()
dim_size = 20
one_dim = (ValueType.DISCRETE, [0, 1], False)
dim_list = [(one_dim)] * dim_size
dim = Dimension2(dim_list) # the dim is prepared by the class
objective = Objective(problem.fx, dim) # form up the objective function
budget = 100 * dim.get_size() # number of calls to the objective function
parameter = Parameter(budget=budget, seed=777)
sol = Opt.min(objective, parameter)
assert sol.get_value() < 2
# sphere
dim_size = 100 # dimensions
one_dim = (ValueType.DISCRETE, [-10, 10], True)
dim_list = [(one_dim)] * dim_size
dim = Dimension2(dim_list) # form up the dimension object
objective = Objective(sphere_discrete_order, dim) # form up the objective function
parameter = Parameter(budget=10000)
sol = Opt.min(objective, parameter)
assert sol.get_value() < 200
def test_asracos_performance(self):
# continuous
dim = 100 # dimension
objective = Objective(ackley, Dimension(dim, [[-1, 1]] * dim, [True] * dim)) # setup objective
parameter = Parameter(budget=100 * dim, parallel=True, server_num=2, seed=2)
# parameter = Parameter(budget=100 * dim, init_samples=[Solution([0] * 100)]) # init with init_samples
solution_list = ExpOpt.min(objective, parameter, repeat=1)
for solution in solution_list:
value = solution.get_value()
assert value < 0.2
# discrete
# setcover
problem = SetCover()
dim = problem.dim # the dim is prepared by the class
objective = Objective(problem.fx, dim) # form up the objective function
budget = 100 * dim.get_size() # number of calls to the objective function
parameter = Parameter(budget=budget, parallel=True, server_num=2, seed=777)
sol = ExpOpt.min(objective, parameter, repeat=1)[0]
assert sol.get_value() < 2
# sphere
dim_size = 100 # dimensions
dim_regs = [[-10, 10]] * dim_size # dimension range
dim_tys = [False] * dim_size # dimension type : integer
dim_order = [True] * dim_size
dim = Dimension(dim_size, dim_regs, dim_tys, order=dim_order) # form up the dimension object
objective = Objective(sphere_discrete_order, dim) # form up the objective function
parameter = Parameter(budget=10000, parallel=True, server_num=2, uncertain_bits=1, seed=1)
sol = ExpOpt.min(objective, parameter)[0]
assert sol.get_value() < 10
| 2.46875 | 2 |
cogs/rolemanager.py | yaansz/RoleManager | 1 | 12791428 | import discord
from discord.ext import commands, tasks
from discord.ext.commands import has_permissions, CheckFailure
from utils.converters import CtxRoleConverter
from utils.utils import str2bool
from functools import reduce
import random
import json
import utils.embed as embed
from utils.colors import *
import os
#DB
from pymongo import MongoClient
import logging
# ENV
from dotenv import dotenv_values
ENV = dotenv_values(os.path.dirname(os.path.abspath(__file__)) + "/../.env")
class RoleManager(commands.Cog):
"""
Manager is useful to create and delete roles.
You can link a role to a chat or just create a role with a name that you like!
"""
def __init__(self, client):
self.client = client
# Some good paramters like timer and other shits
with open(os.path.dirname(os.path.abspath(__file__)) + '/../database/utils.json', 'r') as f:
info = json.load(f)
# Just to log everything :D
self.log = logging.getLogger(__name__)
# TODO: Loading things :P (I want to put it in a parent class, but i'm not sure at this moment)
self.delete_user_message = info['utils']['delete_user_message']
self.delete_system_message = info['utils']['delete_system_message']
self.db_client = MongoClient(ENV['MONGODB'])
self.guild_preferences_db = self.db_client[info['mongo']['database']][info['mongo']['collection']]
self.channel_permissions = [
"add_reactions",
"administrator",
"attach_files",
"ban_members",
"change_nickname",
"connect",
"create_instant_invite",
"deafen_members",
"embed_links",
"external_emojis",
"kick_members",
"manage_channels",
"manage_emojis",
"manage_guild",
"manage_messages",
"manage_nicknames",
"manage_permissions",
"manage_roles",
"manage_webhooks",
"mention_everyone",
"move_members",
"mute_members",
"priority_speaker",
"read_message_history",
"read_messages",
"request_to_speak",
"send_messages",
"send_tts_messages",
"speak",
"stream",
"use_external_emojis",
"use_slash_commands",
"use_voice_activation",
"value",
"view_audit_log",
"view_channel",
"view_guild_insights"
]
@commands.Cog.listener()
async def on_guild_channel_update(self, before, after):
'''
Function to monitor guild channels and delete a role linked to a channel if the channel was moved to trash
'''
# Mudou de categoria
if after.category == None:
return
elif (before.category == None and after.category != None) or (before.category.id != after.category.id):
guild = after.guild
info = self.guild_preferences_db.find_one({"_id": guild.id})
# Nome criado sempre que um chat é linkado a uma categoria!
if before.category != None:
role_name = before.category.name + " - " + before.name
else:
role_name = before.name
# Categoria que devo deletar o cargo
if after.category.id == info['archives']:
for r in guild.roles:
if r.name == role_name:
await r.delete()
embedmsg = embed.createEmbed(title="Cargo associado excluído!",
description= f"O cargo '{role_name}' associado ao canal foi excluído devido a movimentação do mesmo para os arquivos.",
color=rgb_to_int((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))),
fields=[
],
img="https://cdn.discordapp.com/emojis/753575574546415656.png?v=1")
# Send that shit
await after.send(embed=embedmsg)
self.log.debug(f"Role {role_name} deleted (Channel moved to archives)!")
return
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel):
target_type_channels = ["text", "category"]
if channel.type.name.lower() not in target_type_channels:
return
elif channel.type.name.lower() == "text" and channel.category != None:
option = channel.category.name + " - " + channel.name
# I don't know why i did that shit, but i won't change
elif channel.type.name.lower() == "text":
option = channel.name
else:
option = channel.name
for r in channel.guild.roles:
if r.name == option:
role = r
await role.delete()
self.log.debug(f"Role '{option}' deleted because linked channel was deleted")
break
return
@commands.command(aliases=['criar'], pass_context=True)
@has_permissions(manage_roles = True)
async def create(self, ctx, *, args: str = "channel"):
"""Create a new role with the given name
"""
await ctx.message.delete(delay = self.delete_user_message)
linked_keys = ["channel", "category"]
role_name = self.linked_role(ctx, args) if args in linked_keys else args
# Defining useful variables
guild = ctx.guild
author = ctx.author
msg = ctx.message
role_exists, role = await self.role_exists(ctx, role_name)
if role_exists:
embedmsg = embed.createEmbed(title="CARGO JÁ EXISTE!",
description= f"O cargo <@&{role.id}> já está no servidor, não precisa criar de novo!🍻",
color=rgb_to_int((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))),
fields=[
("Como pegar?", f"Apenas digite '.get' e ele será adicionado na sua conta", False)
],
img="https://cdn.discordapp.com/emojis/814010519022600192.png?v=1")
await msg.channel.send(embed=embedmsg, delete_after= self.delete_system_message)
else:
# New Role Created!
new_role = await guild.create_role(name=role_name, mentionable=True)
self.log.info( (f"New role '{new_role.name}' created in guild {guild.name} : {guild.id}").encode('ascii', 'ignore').decode('ascii') )
# TODO: Especificar a mensagem de acordo com o cargo que foi criado!
embedmsg = embed.createEmbed(title="Novo Cargo!",
description= f"O cargo <@&{new_role.id}> foi criado por <@{author.id}>",
color=rgb_to_int((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))),
fields=[
("Como pegar?", f"Apenas digite .get no chat do cargo ou .get {new_role.name} e ele será adicionado na sua conta", False)
],
img="https://cdn.discordapp.com/emojis/859150737509580800.gif?v=1")
await msg.channel.send(embed=embedmsg)
return
@create.error
async def create_error(self, ctx, error):
await ctx.message.delete(delay = self.delete_user_message)
if isinstance(error, CheckFailure):
await ctx.send("**Erro:** Você não pode criar um cargo!", delete_after = self.delete_system_message)
else:
self.log.error(f"{error} - creation of a new role failed")
await ctx.send(error, delete_after = self.delete_system_message)
# TODO: Parent class too
async def role_exists(self, ctx, role_name):
"""
Method to check if a role exists in the current context, return a status and the role, if it exists.
"""
conv = commands.RoleConverter()
# If found it
# The role already exists
try:
r = await conv.convert(ctx, role_name)
return True, r
except commands.RoleNotFound:
return False, None
# TODO: Put it in a parent class
def linked_role(self, ctx, type: str):
"""
This function is used to return a name to a role linked to a channel or category
"""
guild = ctx.guild
author = ctx.author
msg = ctx.message
if type.lower() == "channel" and msg.channel.category != None:
option = msg.channel.category.name + " - " + msg.channel.name
elif type.lower() == "channel":
option = msg.channel.name
elif type.lower() == "category":
option = msg.channel.category.name
else:
raise ValueError("")
return option;
@commands.command(aliases=['deletar'], pass_context=True)
@has_permissions(manage_roles = True)
async def delete(self, ctx, *, role: commands.RoleConverter):
await ctx.message.delete(delay= self.delete_user_message)
await role.delete()
await ctx.send(f"**AVISO:** Cargo '{role.name}' apagado do servidor por <@{ctx.author.id}>!")
@delete.error
async def delete_error(self, ctx, error):
await ctx.message.delete(delay = self.delete_user_message)
if isinstance(error, CheckFailure):
await ctx.send("**Erro:** Você não pode deletar um cargo!", delete_after = self.delete_system_message)
else:
self.log.error(f"{error} - delete role failed")
await ctx.send(error, delete_after = self.delete_system_message)
async def _permission(self, ctx, role: CtxRoleConverter, mode: str, perm: str, can: bool):
guild = ctx.guild
author = ctx.author
msg = ctx.message
overwrite = discord.PermissionOverwrite()
# Fundamental
# x.attr_name = s
# setattr(x, 'attr_name', s)
if perm not in channel_permissions:
self.log.debug( f"[.permission] Permission {perm} not found!")
return
setattr(overwrite, perm, can)
if mode == 'category':
category = ctx.channel.category
await category.set_permissions(role, overwrite = overwrite)
elif mode == 'channel':
channel = ctx.channel
await channel.set_permissions(role, overwrite = overwrite)
else:
# TODO: N ta funcionando
await role.edit(permission = overwrite)
self.log.debug( (f'Permission {perm} was changed to {can} in role {role.name} in current category').encode('ascii', 'ignore').decode('ascii') )
fb = 'Permitido' if can else 'Proibido'
embedmsg = embed.createEmbed(title="Permissão alterada!",
description= f"O cargo <@&{role.id}> foi atualizado por <@{author.id}>",
color=rgb_to_int((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))),
fields=[
(f"Permissão '{perm}'", f"Atualizada para {fb}", False)
],
img="https://cdn.discordapp.com/emojis/765969524897218594.png?v=1")
await msg.channel.send(embed=embedmsg)
return
@commands.command(pass_context=True)
@has_permissions(manage_roles = True, manage_channels = True)
async def permission(self, ctx, *, args: str = ""):
"""
Arg List:
ctx -> Discord Context
role -> CtxRoleConverter
mode -> channel, category or role
perm -> permission to change
bool -> bool
"""
await ctx.message.delete(delay = self.delete_user_message)
splitted_args = args.split(' ')
if len(splitted_args) < 4 or args == "":
# Just for now
self.log.debug("[.permission] Missing args")
await self.permission_tutorial(ctx)
return;
can = str2bool(splitted_args[-1])
perm = splitted_args[-2]
mode = splitted_args[-3]
role_name = ' '.join(splitted_args[:-3])
status, role = await self.role_exists(ctx, role_name)
await self._permission(ctx, role, mode, perm, can)
async def permission_tutorial(self, ctx):
embedmsg = embed.createEmbed(title="Configurações de Permissões!",
description= f"Verifique a lista de argumentos e permissões",
color=rgb_to_int((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))),
fields=[
(f"Argumentos", f"""role -> Role
mode -> channel, category or role
perm -> permission to change
bool -> bool""", False),
(f"Permissões", "\n".join([item for item in self.channel_permissions]), False)
],
img="https://cdn.discordapp.com/emojis/767241157003837460.png?v=1")
await ctx.send(embed=embedmsg)
# Setup
def setup(client):
client.add_cog(RoleManager(client))
| 2.546875 | 3 |
app/models/__init__.py | woodybriggs/attribute-store | 0 | 12791429 | from sqlalchemy import Column, String, Boolean, ForeignKey, Integer, Float
from ..db import Base
class Attribute(Base):
__tablename__ = "attribute"
id = Column(Integer, autoincrement=True, primary_key=True, unique=True, nullable=False)
type = Column(String(length=256), nullable=False)
remote_reference = Column(String(256), nullable=False)
key = Column(String(length=256), unique=True)
__mapper_args__ = {
'polymorphic_identity': 'attribute',
'polymorphic_on': type
}
class BooleanAttribute(Attribute):
__tablename__ = "boolean_attribute"
id = Column(Integer, ForeignKey('attribute.id'), primary_key=True)
value = Column(Boolean)
__mapper_args__ = {
'polymorphic_identity': bool.__name__
}
class IntegerAttribute(Attribute):
__tablename__ = "integer_attribute"
id = Column(Integer, ForeignKey('attribute.id'), primary_key=True)
value = Column(Integer)
__mapper_args__ = {
'polymorphic_identity': int.__name__
}
class FloatAttribute(Attribute):
__tablename__ = "float_attribute"
id = Column(Integer, ForeignKey('attribute.id'), primary_key=True)
value = Column(Float)
__mapper_args__ = {
'polymorphic_identity': float.__name__
}
class StringAttribute(Attribute):
__tablename__ = "string_attribute"
id = Column(Integer, ForeignKey('attribute.id'), primary_key=True)
value = Column(String(length=4096))
__mapper_args__ = {
'polymorphic_identity': str.__name__
} | 2.625 | 3 |
scihub/scihub.py | lkirk/scihub-client | 0 | 12791430 | # -*- coding: utf-8 -*-
"""
SciHub client
"""
import logging
import os
import random
import urllib
import requests
from bs4 import BeautifulSoup
LOG = logging.getLogger(__name__)
LOG.addHandler(logging.NullHandler())
class SciHubClient:
"""
Client for accessing SciHub
"""
DEFAULT_HEADERS = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:77.0) Gecko/20100101 Firefox/77.0",
}
SCIHUB_NOW_URL = "https://sci-hub.now.sh"
FALLBACK_BASE_URL = "https://sci-hub.tw"
def __init__(self, proxy=None, fallback_base_url=FALLBACK_BASE_URL):
self._sess = requests.Session()
self._sess.headers.update(self.DEFAULT_HEADERS)
self._fallback_base_url = fallback_base_url
self._available_base_url_list = self._get_available_scihub_urls()
self._set_base_url()
if proxy is not None:
self._set_proxy(proxy)
def _get(self, url, raise_for_status=True, **kwargs):
response = self._sess.get(url, **kwargs)
if raise_for_status is True:
response.raise_for_status()
return response
def _post(self, url, raise_for_status=True, **kwargs):
response = self._sess.post(url, **kwargs)
if raise_for_status is True:
response.raise_for_status()
return response
def _get_available_scihub_urls(self):
response = self._get(self.SCIHUB_NOW_URL, raise_for_status=False)
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
LOG.debug("falling back to %s", self._fallback_base_url)
return [self._fallback_base_url]
parsed_content = BeautifulSoup(response.content, "html.parser")
urls = []
for a_tag in parsed_content.find_all("a", href=True):
link = a_tag["href"]
if (
"sci-hub" in link # pylint: disable=C0330
and link.startswith("https") # pylint: disable=C0330
and link != self.SCIHUB_NOW_URL # pylint: disable=C0330
):
urls.append(a_tag["href"])
return urls
def _set_proxy(self, proxy):
self._sess.proxies = {
"http": proxy,
"https": proxy,
}
def _set_base_url(self):
"""
Pick a random url from the available scihub urls
set the current base url to the new url
"""
if not self._available_base_url_list:
raise ValueError("Ran out of valid sci-hub urls")
(base_url,) = random.sample(self._get_available_scihub_urls(), 1)
self._base_url = base_url
LOG.debug("url changing to %s", self._base_url)
@staticmethod
def _get_doi(parsed_response):
((doi,),) = [
[
line.strip().split("'")[1]
for line in script.string.split("\n")
if "var doi" in line
]
for script in parsed_response.find_all("script")
if script.string and "var doi" in script.string
]
return doi
def query(self, query):
"""
Query for a paper hosted by sci-hub
"""
response = self._post(
self._base_url,
data={"request": query},
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
parsed_response = BeautifulSoup(response.content, "html.parser")
if parsed_response.find("div").text.endswith("article not found"):
raise ValueError(f"Article not found: {query}")
cleaned_url = urllib.parse.urlparse(
urllib.parse.urldefrag(parsed_response.find("iframe").get("src")).url,
scheme="https",
).geturl()
return {
"doi": self._get_doi(parsed_response),
"pdf_url": cleaned_url,
}
def _download_pdf(self, url):
result = self._get(url)
if result.headers["Content-Type"] != "application/pdf":
raise ValueError("File is not a pdf")
return result.content
def _get_paper_meta(self, doi):
return self._get(
urllib.parse.urljoin("https://doi.org", doi),
headers={"Accept": "application/vnd.citationstyles.csl+json"},
).json()
def _generate_file_name(self, doi):
paper_meta = self._get_paper_meta(doi)
# date = "-".join(map(str, paper_meta["indexed"]["date-parts"][0]))
((year, _, _),) = paper_meta["published-print"]["date-parts"]
title = paper_meta["title"]
# return f"({date}) {title}.pdf"
return f"({year}) {title}.pdf"
def download(self, query, destination="", filename=None):
"""
Download paper from sci-hub
"""
query_result = self.query(query)
pdf_string = self._download_pdf(query_result["pdf_url"])
filename = (
self._generate_file_name(query_result["doi"])
if filename is None
else filename
)
out_path = os.path.join(destination, filename)
with open(out_path, "wb") as out_fp:
out_fp.write(pdf_string)
return {"out_path": out_path, **query_result}
| 2.515625 | 3 |
jobbing/models_remote/media.py | davidall-amdocs/jobbing | 0 | 12791431 | <gh_stars>0
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from jobbing.models.base_model_ import Model
from jobbing import util
class Media(Model):
def __init__(self,
media_id:int = None,
media_status_id:int = None,
media_data:str = None,
media_link:str = None,
media_title:str = None,
media_description:str = None,
media_size:float = None,
media_content_upload_date:str = None,
media_content_updated_date:str = None): # noqa: E501
self.swagger_types = {
'media_id': int,
'media_status_id': int,
'media_data': str,
'media_link': str,
'media_title': str,
'media_description': str,
'media_size': float,
'media_content_upload_date': str,
'media_content_updated_date': str
}
self.attribute_map = {
'media_id': 'media_id',
'media_status_id': 'media_status_id',
'media_data': 'media_data',
'media_link': 'media_link',
'media_title': 'media_title',
'media_description': 'media_description',
'media_size': 'media_size',
'media_content_upload_date': 'media_content_upload_date',
'media_content_updated_date': 'media_content_updated_date'
}
self._media_id = media_id
self._media_status_id = media_status_id
self._media_data = media_data
self._media_link = media_link
self._media_title = media_title
self._media_description = media_description
self._media_size = media_size
self._media_content_upload_date = media_content_upload_date
self._media_content_updated_date = media_content_updated_date
@classmethod
def from_dict(cls, dikt) -> 'Media':
return util.deserialize_model(dikt, cls)
@property
def media_id(self) -> int:
return self._media_id
@media_id.setter
def media_id(self, param):
if param is None:
raise ValueError("Invalid value for `media_id`, must not be `None`") # noqa: E501
self._media_id = param
@property
def media_status_id(self) -> int:
return self._media_status_id
@media_status_id.setter
def media_status_id(self, param):
if param is None:
raise ValueError("Invalid value for `media_status_id`, must not be `None`") # noqa: E501
self._media_status_id = param
@property
def media_data(self) -> str:
return self._media_data
@media_data.setter
def media_data(self, param):
if param is None:
raise ValueError("Invalid value for `media_data`, must not be `None`") # noqa: E501
self._media_data = param
@property
def media_link(self) -> str:
return self._media_link
@media_link.setter
def media_link(self, param):
if param is None:
raise ValueError("Invalid value for `media_link`, must not be `None`") # noqa: E501
self._media_link = param
@property
def media_title(self) -> str:
return self._media_title
@media_title.setter
def media_title(self, param):
if param is None:
raise ValueError("Invalid value for `media_title`, must not be `None`") # noqa: E501
self._media_title = param
@property
def media_description(self) -> str:
return self._media_description
@media_description.setter
def media_description(self, param):
if param is None:
raise ValueError("Invalid value for `media_description`, must not be `None`") # noqa: E501
self._media_description = param
@property
def media_size(self) -> float:
return self._media_size
@media_size.setter
def media_size(self, param):
if param is None:
raise ValueError("Invalid value for `media_size`, must not be `None`") # noqa: E501
self._media_size = param
@property
def media_content_upload_date(self) -> str:
return self._media_content_upload_date
@media_content_upload_date.setter
def media_content_upload_date(self, param):
if param is None:
raise ValueError("Invalid value for `media_content_upload_date`, must not be `None`") # noqa: E501
self._media_content_upload_date = param
@property
def media_content_updated_date(self) -> str:
return self._media_content_updated_date
@media_content_updated_date.setter
def media_content_updated_date(self, param):
if param is None:
raise ValueError("Invalid value for `media_content_updated_date`, must not be `None`") # noqa: E501
self._media_content_updated_date = param
| 1.867188 | 2 |
qctool/src/mode.py | meghasin/icees-api-config | 0 | 12791432 | <gh_stars>0
from typing import Dict, List, Optional
from dataclasses import dataclass, field
from file import YAMLFile
@dataclass
class CacheTables:
tables: Dict[str, list] = field(default_factory=dict)
table_names: List[str] = field(default_factory=list)
current_table: Optional[int] = 0
def update_tables(self, config, tables):
self.tables = tables
if self.current_table >= len(config.table_names):
self.current_table = len(config.table_names) - 1
if self.current_table < 0:
self.current_table = 0
@dataclass
class CacheFile:
filename: str
typ: str
update : Optional[str] = None
fil: Optional[YAMLFile] = None
old_key : Optional[str] = None
def update_file(self, fil):
self.fil = fil
self.old_key = None
@dataclass
class DiffMode:
a_cache_file : CacheFile
b_cache_file : CacheFile
cache_tables : CacheTables = field(default_factory=CacheTables)
def update_files(self, config, a_file, b_file, tables):
self.a_cache_file.update_file(a_file)
self.b_cache_file.update_file(b_file)
self.cache_tables.update_tables(config, tables)
@dataclass
class FocusedMode:
a_focused: bool
cache_file: CacheFile
cache_tables: CacheTables = field(default_factory=CacheTables)
def update_file(self, config, fil, tables):
self.cache_file.update_file(fil)
self.cache_tables.update_tables(config, tables)
@dataclass
class Config:
a_filename: str
b_filename: str
a_type: str
b_type: str
a_only: bool
b_only: bool
table_names: List[str]
similarity_threshold: float
max_entries: int
ignore_suffix: List[str]
a_updated: bool
b_updated: bool
a_update: Optional[str]
b_update: Optional[str]
| 2.390625 | 2 |
Testing/test_2D_frames.py | geosharma/PyNite | 199 | 12791433 | <filename>Testing/test_2D_frames.py
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 <NAME>, SE; tamalone1
"""
import unittest
from PyNite import FEModel3D
import math
import sys
from io import StringIO
class Test_2D_Frame(unittest.TestCase):
''' Tests of analyzing 2D frames. '''
def setUp(self):
# Suppress printed output temporarily
sys.stdout = StringIO()
def tearDown(self):
# Reset the print function to normal
sys.stdout = sys.__stdout__
def test_XY_gravity_load(self):
# A First Course in the Finite Element Method, 4th Edition
# <NAME>
# Problem 5.30
# Units for this model are kips and inches
frame = FEModel3D()
# Define the nodes
frame.add_node('N1', 0, 0, 0)
frame.add_node('N2', 0, 30*12, 0)
frame.add_node('N3', 15*12, 40*12, 0)
frame.add_node('N4', 35*12, 40*12, 0)
frame.add_node('N5', 50*12, 30*12, 0)
frame.add_node('N6', 50*12, 0, 0)
# Define the supports
frame.def_support('N1', True, True, True, True, True, True)
frame.def_support('N6', True, True, True, True, True, True)
# Create members (all members will have the same properties in this example)
J = 250
Iy = 250
Iz = 200
E = 30000
G = 250
A = 12
frame.add_member('M1', 'N1', 'N2', E, G, Iy, Iz, J, A)
frame.add_member('M2', 'N2', 'N3', E, G, Iy, Iz, J, A)
frame.add_member('M3', 'N3', 'N4', E, G, Iy, Iz, J, A)
frame.add_member('M4', 'N4', 'N5', E, G, Iy, Iz, J, A)
frame.add_member('M5', 'N5', 'N6', E, G, Iy, Iz, J, A)
# Add nodal loads
frame.add_node_load('N3', 'FY', -30)
frame.add_node_load('N4', 'FY', -30)
# Analyze the model
frame.analyze()
# subTest context manager prints which portion fails, if any
correct_values = [('N1', {'RxnFX': 11.6877,
'RxnFY': 30,
'RxnMZ': -1810.0745}),
('N6', {'RxnFX': -11.6877,
'RxnFY': 30,
'RxnMZ': 1810.0745})]
for name, values in correct_values:
with self.subTest(node=name):
node = frame.Nodes[name]
# Two decimal place accuracy requires +/-0.5% accuracy
# one decimal place requires +/-5%
self.assertAlmostEqual(node.RxnFX['Combo 1']/values['RxnFX'], 1.0, 2)
self.assertAlmostEqual(node.RxnFY['Combo 1']/values['RxnFY'], 1.0, 2)
self.assertAlmostEqual(node.RxnMZ['Combo 1']/values['RxnMZ'], 1.0, 2)
def test_XY_member_ptload(self):
frame = FEModel3D()
# Add nodes
frame.add_node('N1', 0, 0, 0) # ft
frame.add_node('N2', 0, 7.667, 0) # ft
frame.add_node('N3', 7.75, 7.667, 0) # ft
frame.add_node('N4', 7.75, 0, 0) # ft
# Add supports
frame.def_support('N1', True, True, True, True, True, False)
frame.def_support('N4', True, True, True, True, True, False)
# Define material and section properties for a W8x24
E = 29000*12**2 # ksf
G = 1111200*12**2 # ksf
Iy = 18.3/12**4 # ft^4
Iz = 82.7/12**4 # ft^4
J = 0.346/12**4 # ft^4
A = 5.26/12**2 # in^2
# Define members
frame.add_member('M1', 'N1', 'N2', E, G, Iy, Iz, J, A)
frame.add_member('M2', 'N2', 'N3', E, G, Iy, Iz, J, A)
frame.add_member('M3', 'N4', 'N3', E, G, Iy, Iz, J, A)
# Add loads to the frame
frame.add_member_pt_load('M2', 'Fy', -5, 7.75/2) # 5 kips @ midspan
frame.add_member_dist_load('M2', 'Fy', -0.024, -0.024) # W8x24 self-weight
# Analyze the frame
frame.analyze()
calculated_RZ = frame.Nodes['N1'].RZ['Combo 1']
# Update the expected value to an appropriate precision
expected_RZ = 0.00022794540510395617
self.assertAlmostEqual(calculated_RZ/expected_RZ, 1.0, 2)
def test_YZ_gravity_load(self):
# A First Course in the Finite Element Method, 4th Edition
# Daryl <NAME>
# Problem 5.30
# Units for this model are kips and inches
frame = FEModel3D()
# Define the nodes
frame.add_node('N1', 0, 0, 0)
frame.add_node('N2', 0, 30*12, 0)
frame.add_node('N3', 0, 40*12, 15*12)
frame.add_node('N4', 0, 40*12, 35*12)
frame.add_node('N5', 0, 30*12, 50*12)
frame.add_node('N6', 0, 0, 50*12)
# Define the supports
frame.def_support('N1', True, True, True, True, True, True)
frame.def_support('N6', True, True, True, True, True, True)
# Create members (all members will have the same properties in this example)
J = 250
Iy = 250
Iz = 200
E = 30000
G = 250
A = 12
frame.add_member('M1', 'N1', 'N2', E, G, Iz, Iy, J, A)
frame.add_member('M2', 'N2', 'N3', E, G, Iy, Iz, J, A)
frame.add_member('M3', 'N3', 'N4', E, G, Iy, Iz, J, A)
frame.add_member('M4', 'N4', 'N5', E, G, Iy, Iz, J, A)
frame.add_member('M5', 'N5', 'N6', E, G, Iz, Iy, J, A)
# Add nodal loads
frame.add_node_load('N3', 'FY', -30)
frame.add_node_load('N4', 'FY', -30)
# Analyze the model
frame.analyze()
# subTest context manager prints which portion fails, if any
# Check reactions at N1 and N6
correct_reactions = [('N1', {'RxnFZ': 11.6877,
'RxnFY': 30,
'RxnMX': 1810.0745}),
('N6', {'RxnFZ': -11.6877,
'RxnFY': 30,
'RxnMX': -1810.0745})]
for name, values in correct_reactions:
with self.subTest(node=name):
node = frame.Nodes[name]
# Two decimal place accuracy requires +/-0.5% accuracy
# one decimal place requires +/-5%
self.assertAlmostEqual(node.RxnFZ['Combo 1']/values['RxnFZ'], 1.0, 2)
self.assertAlmostEqual(node.RxnFY['Combo 1']/values['RxnFY'], 1.0, 2)
self.assertAlmostEqual(node.RxnMX['Combo 1']/values['RxnMX'], 1.0, 2)
# Check displacements at N3 and N4
correct_displacements = [('N3', {'DY': -6.666757,
'RX': 0.032}),
('N4', {'DY': -6.666757,
'RX': -0.032})]
for name, values in correct_displacements:
with self.subTest(node=name):
node = frame.Nodes[name]
# Two decimal place accuracy requires +/-0.5% accuracy
# one decimal place requires +/-5%
self.assertAlmostEqual(node.DY['Combo 1']/values['DY'], 1.0, 2)
self.assertAlmostEqual(node.RX['Combo 1']/values['RX'], 1.0, 2)
def test_XZ_ptload(self):
# A simply supported beam with a point load.
# Units used in this example are inches, and kips
SimpleBeam = FEModel3D()
# Add nodes (14 ft = 168 in apart)
SimpleBeam.add_node("N1", 0, 0, 0)
SimpleBeam.add_node("N2", 0, 0, 168)
# Add a beam with the following properties:
A = 20
E = 29000
G = 11400
Iy = 100
Iz = 150
J = 250
SimpleBeam.add_member("M1", "N1", "N2", E, G, Iy, Iz, J, A)
# Provide simple supports
SimpleBeam.def_support("N1", True, True, True, False, False, True)
SimpleBeam.def_support("N2", True, True, True, False, False, False)
# Add a point load of 5 kips at the midspan of the beam
SimpleBeam.add_member_pt_load("M1", "Fy", 5, 7 * 12)
# Analyze the beam
SimpleBeam.analyze(False)
# Print reactions at each end of the beam
correct_reactions = [('N1', -2.5),
('N2', -2.5)]
for node_name, rxn in correct_reactions:
with self.subTest(node=node_name):
calculated_reaction = SimpleBeam.Nodes[node_name].RxnFY['Combo 1']
# Two decimal place accuracy requires +/-0.5% accuracy
# one decimal place requires +/-5%
self.assertAlmostEqual(calculated_reaction/rxn, 1.0, 2)
def test_Kassimali_3_35(self):
"""
Tests against Kassimali example 3.35.
This example was selected because it allows us to check the following features:
1. Member loads aligned in global directions.
2. A member internal hinge.
3. A point load at the end of a member.
The example will be run in the XZ plane to change things up a bit.
"""
frame = FEModel3D()
frame.add_node('A', 0, 0, 0)
frame.add_node('B', 0, 0, 24)
frame.add_node('C', 12, 0, 0)
frame.add_node('D', 12, 0, 24)
frame.add_node('E', 24, 0, 12)
E = 29000*12**2
G = 11200*12**2
Iy = 17.3/12**4
Iz = 204/12**4
J = 0.3/12**4
A = 7.65/12**2
frame.add_member('AC', 'A', 'C', E, G, Iy, Iz, J, A)
frame.add_member('BD', 'B', 'D', E, G, Iy, Iz, J, A)
frame.add_member('CE', 'C', 'E', E, G, Iy, Iz, J, A)
frame.add_member('ED', 'E', 'D', E, G, Iy, Iz, J, A)
frame.def_support('A', support_DX=True, support_DY=True, support_DZ=True)
frame.def_support('B', support_DX=True, support_DY=True, support_DZ=True)
frame.def_support('E', support_DY=True)
frame.def_releases('CE', Rzj=True)
frame.add_member_pt_load('AC', 'FZ', 20, 12)
frame.add_member_dist_load('CE', 'FX', -1.5, -1.5)
frame.add_member_dist_load('ED', 'FX', -1.5, -1.5)
# from PyNite.Visualization import render_model
# render_model(frame, text_height=0.5, case='Case 1')
frame.analyze()
AZ = -8.63
AX = 15.46
BZ = -11.37
BX = 35.45
# The reactions were compared manually to Kassimali's solution and the shears were within
# 10% and 7% respectively. That seems like it's a little big to be a rounding error alone.
# Likely the finite element method is a little more accurate than the simplified method
# Kassimali uses.
self.assertLess(abs(frame.Nodes['A'].RxnFZ['Combo 1']/AZ - 1), 0.1)
self.assertLess(abs(frame.Nodes['A'].RxnFX['Combo 1']/AX - 1), 0.05)
self.assertLess(abs(frame.Nodes['B'].RxnFZ['Combo 1']/BZ - 1), 0.7)
self.assertLess(abs(frame.Nodes['B'].RxnFX['Combo 1']/BX - 1), 0.05) | 2.765625 | 3 |
src/__init__.py | iki-taichi/tf-keras-transformer | 5 | 12791434 | # coding:utf-8
#from .custom_callbacks import *
| 1.09375 | 1 |
cycada/data/surreal.py | AdityaAS/cycada | 1 | 12791435 | <reponame>AdityaAS/cycada<filename>cycada/data/surreal.py
import numpy as np
import scipy.io
import torch
import os
from torch.utils.data import Dataset
from glob import glob
from os.path import join, exists
import json
from cycada.data.data_loader import register_data_params, register_dataset_obj
from cycada.data.data_loader import DatasetParams
import cv2
from cycada.data.util import convert_image_by_pixformat_normalize
import multiprocessing as mp
from joblib import Parallel, delayed
@register_data_params('surreal')
class SurrealParams(DatasetParams):
num_channels = 3
image_size = 256
mean = 0.5
num_cls = 2
fraction = 1.0
target_transform = None
black = False
def __init__(self, name):
config = None
print("PARAM: {}".format(os.getcwd()))
with open(join("dataset_configs", name+".json"), 'r') as f:
config = json.load(f)
self.num_channels = config["num_channels"]
self.image_size = config["image_size"]
self.mean = config["mean"]
self.num_cls = config["num_cls"]
self.fraction = config["fraction"]
self.target_transform = config["target_transform"]
self.black = config["black"]
@register_dataset_obj('surreal')
class SurrealLoader(Dataset):
# root must be /scratch/users/aditya/adult/SURREAL/surreal/download/SURREAL/data/cmu
def __init__(self, name, root, params, num_cls=2, split='train', remap_labels=True,
transform=None, target_transform=None):
self.root = root
self.split = split
self.remap_labels = remap_labels
self.name = name
self.runs = ['run0']
self.transform = transform
self.images = []
self.segmasks = []
self.target_transform = target_transform
self.data_path = join(self.root, self.split)
self.num_cls = num_cls
self.size = (params.image_size, params.image_size)
self.bw_flag = params.black
self.seed = 255
self.fraction = params.fraction if (self.split == 'train') else 1.0
self.collect_ids()
def get_subject_data(self, subjectpath):
imagepath = join(subjectpath, 'images')
imagesubjects = glob(join(imagepath, '*'))
images = []
segmasks = []
for imagesubject in imagesubjects:
images = images + sorted(glob(join(imagesubject, '*')))
segmasks = segmasks + sorted(glob(join(imagesubject.replace('images', 'segmasks'), '*')))
return [images, segmasks]
def collect_ids(self):
from timeit import default_timer as timer
from datetime import timedelta
# Parallelize the for loop
for run in self.runs:
runpath = join(self.data_path, run)
subjects = sorted(glob(join(runpath, '*')))
start = timer()
results = Parallel(n_jobs=mp.cpu_count())(delayed(self.get_subject_data)(subject) for subject in subjects)
end = timer()
print(timedelta(seconds=end-start))
for result in results:
self.images = self.images + result[0]
self.segmasks = self.segmasks + result[1]
def img_path(self, index):
return self.images[index]
def label_path(self, index):
return self.segmasks[index]
def __iter__(self):
return self
'''
Input: Index of image to return
Output:
Image in the format NCHW - normalized
Segmask in the format NHW (channels = 1 is understood) - not normalized because they are class labels
'''
def __getitem__(self, index):
img_path = self.img_path(index)
label_path = self.label_path(index)
img = None
if self.bw_flag:
img = cv2.imread(img_path, 0)
img_temp = np.expand_dims(img, axis = 2)
img = np.concatenate((img_temp, img_temp, img_temp), axis=2)
else:
img = cv2.imread(img_path)
target = cv2.imread(label_path)
img = cv2.resize(img, self.size)
target = cv2.resize(target, self.size)
# Convert to NCHW format and normalize to -1 to 1
# WARNING: Original code did mean normalization, we did min max normalization. Change if necessary to old one.
img = torch.Tensor(convert_image_by_pixformat_normalize(img))
#WARNING: target must be made up of 0s and 1s only!
target = torch.Tensor(target.transpose(2, 0, 1)).mean(dim=0)
return img, target
def __len__(self):
return len(self.images)
| 2.265625 | 2 |
bus_arrival.py | luaneyed/bus | 1 | 12791436 | from BusArrivalItem import BusArrivalItem
from api import call
# bus_arrival_item = BusArrivalItem(xml_root.find('msgBody').find('busArrivalItem'))
# print(bus_arrival_item)
def fetch(station_id: str, route_id: str):
response = call(
'busarrivalservice',
{
'stationId': station_id,
'routeId': route_id
}
)
if response is None:
return None
return ''.join(
map(
lambda list_element: str(BusArrivalItem(list_element)),
response
)
)
if __name__ == '__main__':
print(fetch('218000952', '241449005'))
| 2.859375 | 3 |
build/lib/SOMsHelpers.py | msc-acse/acse-9-independent-research-project-wafflescore | 2 | 12791437 | """
Author: <NAME>
GitHub: wafflescore
"""
from minisom import MiniSom, asymptotic_decay
import numpy as np
import matplotlib.pyplot as plt
import itertools
from skimage import measure
from skimage.segmentation import random_walker
from skimage import filters
from scipy.spatial import distance
from collections import Counter
from timeit import default_timer as timer
import random
from acse_9_irp_wafflescore import MiscHelpers as mh
import logging
import sys
logging.basicConfig(format='%(asctime)s | %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
def compute_dim(num_sample):
"""
Compute a default dimension of the SOMs.
This function returns the dimension size of the SOMs.
The size returned is sqrt(5 * sqrt(num_sample)), with the exception
that the minimum dimension size = 10
Parameters
----------
num_sample : int
Total number of data points that will populate the SOMs
Returns
-------
int
Ideal dimension.
"""
dim = 5 * np.sqrt(num_sample)
dim = np.int(np.sqrt(dim))
if dim < 10:
return 10
else:
return dim
def som_assemble(in_data, seed, dim, lr=0.5, sigma=2.5):
"""Initialize the SOMs model for training
Parameters
----------
in_data : np.array or list
data matrix
seed : integer
random seed for reproducibility
dim : int
dimension of the SOMs distance matrix
lr : float, optional
learning rate, by default 0.5
sigma : float, optional
spread of the neighborhood function, by default 2.5
Returns
-------
MiniSom
an object of Minisom class, see minisom.py for further details
"""
# Initialization som and weights
num_features = np.shape(in_data)[1]
som = MiniSom(dim, dim, num_features, sigma=sigma, learning_rate=lr,
neighborhood_function='gaussian', random_seed=seed)
som.pca_weights_init(in_data)
return som
def plot_som(som, in_data, label, save=False, save_name='temp'):
"""plots the distance map / u-matrix of the SOMs along with the label
Parameters
----------
som : MiniSom
trained Minisom object
in_data : np.array or list
data matrix
label : np.array or list
the true label of each data point
save : bool, optional
flag, by default False
save_name : str, optional
the name which will be used to save the plot as png file,
by default 'temp'
"""
plt.figure(figsize=(9, 7))
# Plotting the response for each litho-class
plt.pcolor(som.distance_map().T, cmap='bone_r')
# plotting the distance map as background
plt.colorbar()
for t, xx in zip(label, in_data):
w = som.winner(xx) # getting the winner
# palce a marker on the winning position for the sample xx
plt.text(w[0]+.5, w[1]+.5, str(t),
color=plt.cm.rainbow(t/10.))
plt.axis([0, som.get_weights().shape[0], 0, som.get_weights().shape[1]])
if(save):
save_dir = 'SOMs_results/' + save_name + '_plot.png'
plt.savefig(save_dir)
print('Plot saved at:', save_dir)
plt.show()
def save_som_report(som, save_name, it, et, report=None):
param_vals = str(save_name) + '\n---' + \
'\niterations,' + str(it) + \
'\nelapsed time,' + str(et) + '\n\n'
# save report to file
fdir = save_name + '_report.csv'
print('Report saved at', fdir)
mode = 'w'
f1 = open(fdir, mode)
f1.write(param_vals)
if(report):
f1.write(str(report))
f1.write('\n\n--------------------\n\n')
f1.close()
print('Report saved at:', fdir)
def histedges_equalN(in_data, nbin=10):
"""generates a histogram where each bin will contain the same number of
data points
Parameters
----------
in_data : np.array or list
data array
nbin : int
number of bins to populate, by default 10
Returns
-------
np.array
numpy array of all the histogram bins
"""
ttl_dtp = len(in_data)
return np.interp(np.linspace(0, ttl_dtp, nbin + 1),
np.arange(ttl_dtp),
np.sort(in_data))
def plot_u_matrix(som_u_mat):
"""Plots the distance map / u-matrix of the SOMs
Parameters
----------
som : MiniSom
trained Minisom object
Returns
-------
np.array
numpy array of all the histogram bins
"""
f_image = som_u_mat.flatten()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
fig.show()
ax1.pcolor(som_u_mat, cmap='bone_r')
hist = plt.hist(f_image, histedges_equalN(f_image, 10), density=True)
return hist[1]
def gen_e_model(n_map, som_label):
"""generates the Earth model from neuron map"""
som_class = []
for i in range(len(n_map)):
som_class.append(som_label[n_map[i][0]][n_map[i][1]])
return np.array(som_class)
def closest_n(value):
"""Assign cluster number to the mask's border indexes by using the
closest neighbor's value
Parameters
----------
value : np.array
numpy array of the cluster number, noted that the borders are marked
with 0
Returns
-------
np.array
new label with all the border index populated
"""
borders = np.array(np.where(value == 0)).T
new_label = np.array(value)
vals = np.where(value != 0)
vals = np.array(vals).T
for b in borders:
# find index of the closest value
c_idx = distance.cdist([b], vals).argmin()
new_label[b[0], b[1]] = value[vals[c_idx, 0]][vals[c_idx, 1]]
return new_label
def KNN(value, k=5, border_val=0):
"""Assign cluster number to the mask's border indexes by using the
K-nearest neighbor method
Parameters
----------
value : np.array
numpy array of the cluster number, noted that the borders are marked
with 0
k : int, optional
number of neighbor to consider, by default 5
Returns
-------
np.array
new label with all the border index populated
"""
borders = np.array(np.where(value == border_val)).T
new_label = np.array(value)
vals = np.where(value != 0)
if(len(vals[0]) < 5):
logging.info("Not enough labeled neighbor to perform KNN.\n\
Will return the original inputted value.")
return value
vals = np.array(vals).T
for b in borders:
# find index of the closest k neighbors
dist = distance.cdist([b], vals)
c_idx = np.argpartition(dist, k)
c_idx = c_idx[0, :k]
mins_idx = np.array(list(zip(vals[c_idx, 0], vals[c_idx, 1])))
class_counter = Counter()
for idx in mins_idx:
class_counter[value[idx[0], idx[1]]] += 1
cl = class_counter.most_common(1)[0][0]
new_label[b[0], b[1]] = cl
return new_label
def watershed_level(image, bins, border_width=0.1, plot=False, conn=None):
num_bins = len(bins)
"""Computes and classify the SOM's u-matrix or total gradient using
watershed classification method
Parameters
----------
image : np.array
u-matrix or total gradient of the SOMs
bins : np.array
numpy array of all the histogram bins
plot : bool, optional
flag whether to plot the watershed level or not, by default False
conn : int, optional
connectivity flag for measure.label, by default None
Returns
-------
np.array
numpy array of predicted cluster labels from each watershed level
"""
ncols = 6
if(plot):
fig, axes = plt.subplots(ncols=ncols, nrows=num_bins,
figsize=(12, num_bins*3),
sharex=True, sharey=True)
ax = axes.ravel()
ws_labels = np.zeros((num_bins * ncols, image.shape[0], image.shape[1]))
for i in range(num_bins):
val = filters.threshold_local(image, block_size=3 + 2*i)
block_mask = (image < val)
markers = measure.label(block_mask, connectivity=conn)
ws_labels[i*ncols] = closest_n(markers) - 1
ws_labels[i*ncols + 1] = KNN(markers) - 1
ws_labels[i*ncols + 2] = random_walker(image, markers)
if(plot):
ax[i*ncols].imshow(ws_labels[i*ncols + 0], origin='lower')
ax[i*ncols].title.set_text('b_cn: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 0]))))
ax[i*ncols + 1].imshow(ws_labels[i*ncols + 1], origin='lower')
ax[i*ncols + 1].title.set_text('b_knn: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 1]))))
ax[i*ncols + 2].imshow(ws_labels[i*ncols + 2], origin='lower')
ax[i*ncols + 2].title.set_text('b_rw: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 2]))))
thres_mask = (image <= bins[i])
markers = measure.label(thres_mask, connectivity=conn)
ws_labels[i*ncols + 3] = closest_n(markers) - 1
ws_labels[i*ncols + 4] = KNN(markers) - 1
ws_labels[i*ncols + 5] = random_walker(image, markers)
if(plot):
ax[i*ncols + 3].imshow(ws_labels[i*ncols + 3], origin='lower')
ax[i*ncols + 3].title.set_text('b_cn: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 3]))))
ax[i*ncols + 4].imshow(ws_labels[i*ncols + 4], origin='lower')
ax[i*ncols + 4].title.set_text('b_knn: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 4]))))
ax[i*ncols + 5].imshow(ws_labels[i*ncols + 5], origin='lower')
ax[i*ncols + 5].title.set_text('b_rw: it={} n_class={}'.format(i,
len(np.unique(ws_labels[i*ncols + 5]))))
return ws_labels
def eval_ws(in_data, ws_labels, n_map, label=None, re_all=False):
"""Evaluate and return the best watershed prediction result
Parameters
----------
in_data : np.array or list
data matrix
ws_labels : np.array
predicted cluster labels from watershed segmentation
n_map : np.array
array of the winner neuron
label : np.array or list, optional
the true label of each data point
Returns
-------
np.array
list of best watershed labels, may contain more than one set
"""
len_watershed = ws_labels.shape[0]
cluster_labels = np.zeros((len_watershed, len(in_data)))
avg_sils = np.full(len_watershed, np.nan)
ch_scs = np.full(len_watershed, np.nan)
if(label is not None):
avg_ents = np.full(len_watershed, np.nan)
avg_purs = np.full(len_watershed, np.nan)
for i in range(len_watershed):
param = {'watershed idx': i}
if(len(np.unique(ws_labels[i])) > 1):
cluster_labels[i] = gen_e_model(n_map, ws_labels[i])
avg_sils[i] = mh.int_eval_silhouette(in_data, cluster_labels[i],
method='som_watershed',
param=param)
try:
ch_scs[i] = mh.cal_har_sc(in_data, cluster_labels[i])
except:
ch_scs[i] = -1
if(label is not None):
avg_ents[i], avg_purs[i] = mh.ext_eval_entropy(label,
cluster_labels[i])
best_idx = []
best_idx.append(np.nanargmax(np.array(avg_sils))) # closest to 1
best_idx.append(np.nanargmax(ch_scs)) # higher = better
if(label is not None):
best_idx.append(np.nanargmin(np.array(avg_ents))) # closest to 0
best_idx.append(np.nanargmax(np.array(avg_purs))) # closest to 1
best_idx = np.unique(best_idx)
if(re_all):
return (cluster_labels, avg_sils,
ch_scs, best_idx)
else:
return (cluster_labels[best_idx], avg_sils[best_idx],
ch_scs[best_idx])
def run_SOMs(in_data, dim, iter_cnt, lr, sigma, seed=10):
"""Method to fully run SOMs
Parameters
----------
in_data : np.array or list
data matrix
dim : int
dimension of the SOMs distance matrix
iter_cnt : integer
number of iterations for SOMs to perform
lr : float
learning rate
sigma : float
spread of the neighborhood function, by default 2.5dim : int
seed : integer, optional
random seed for reproducibility, by default 10
Returns
-------
minisom
minisom object
np.array
cluster label
"""
som = som_assemble(in_data, seed, dim, lr, sigma)
som.train_random(in_data, iter_cnt, verbose=False)
u_matrix = som.distance_map().T
watershed_bins = histedges_equalN(u_matrix.flatten())
ws_labels = watershed_level(u_matrix, watershed_bins)
n_map = som.neuron_map(in_data)
cluster_labels, _, _ = eval_ws(in_data, ws_labels, n_map)
return som, cluster_labels
def gen_param_grid(init_guess):
g_dim, g_it, g_lr, g_sigma = init_guess
min_dim = g_dim - 10 if g_dim - 5 > 10 else 10
max_dim = g_dim + 10 if g_dim + 10 > 10 else 20
param_grid = {
'dim': list(range(min_dim, max_dim+1)),
'iter_cnt': list(range(g_it - 500, g_it + 500, 200)),
'learning_rate': list(np.logspace(np.log10(0.25), np.log10(0.75),
base=10, num=100)),
'sigma': list(np.linspace(g_sigma-1, g_sigma+1, num=30)),
}
return param_grid
def random_search_som(in_data, init_guess, max_eval=20, label=None, seed=10,
re_all=False):
"""perform random search for SOMs best parameters.
Parameters
----------
in_data : np.array or list
data matrix
init_guess : tuple
list of initial guess of the parameters, in order of dimension,
number of iterations, learning rate, and sigma
max_eval : int, optional
number of max iterartion to perform the search, by default 20
label : np.array or list, optional
the true label of each data point, by default None
seed : integer, optional
random seed for reproducibility, by default 10
Returns
-------
All cluster label and its counterpart parameters.
"""
random.seed(seed)
param_grid = gen_param_grid(init_guess)
dims = np.zeros(max_eval)
iters = np.zeros(max_eval)
lrs = np.zeros(max_eval)
sigmas = np.zeros(max_eval)
avg_sils = np.full(max_eval, np.nan)
ch_scs = np.full(max_eval, np.nan)
cluster_labels = np.zeros((max_eval, len(in_data)))
if(label is not None):
avg_ents = np.full(max_eval, np.nan)
avg_purs = np.full(max_eval, np.nan)
i = 0
while i < max_eval:
random_params = {k: random.sample(v, 1)[0]
for k, v in param_grid.items()}
dims[i], iters[i], lrs[i], sigmas[i] = list(random_params.values())
som = som_assemble(in_data, seed, int(dims[i]), lr=lrs[i], sigma=sigmas[i])
som.train_random(in_data, int(iters[i]), verbose=False)
u_matrix = som.distance_map().T
watershed_bins = histedges_equalN(u_matrix.flatten())
ws_labels = watershed_level(u_matrix, watershed_bins)
n_map = som.neuron_map(in_data)
_c, _as, _ch = eval_ws(in_data, ws_labels, n_map)
cluster_labels[i], avg_sils[i], ch_scs[i] = _c[0], _as[0], _ch[0]
n_clusters = len(np.unique(cluster_labels[i]))
if(n_clusters < 5 or n_clusters > 30):
logging.info("Random search using dim=%d, iter=%d, lr=%.6f, sigma=%.6f\
result to very small / large number of clusters (n_clusters = %d)\
" % (dims[i], iters[i], lrs[i], sigmas[i], n_clusters))
continue
logging.info("dim=%d, iter=%d, lr=%.6f, sigma=%.6f, sil=%.6f, ch=%.6f" % (dims[i], iters[i], lrs[i], sigmas[i], avg_sils[i], ch_scs[i]))
if(label is not None):
avg_ents[i], avg_purs[i] = mh.ext_eval_entropy(label, cluster_labels[i], init_clus=-1)
logging.info("ent=%.6f, pur=%.6f" % (avg_ents[i], avg_purs[i]))
i += 1
best_idx = []
best_idx.append(np.nanargmax(np.array(avg_sils))) # closest to 1
best_idx.append(np.nanargmax(ch_scs)) # higher = better
if(label is not None):
best_idx.append(np.nanargmin(np.array(avg_ents))) # closest to 0
best_idx.append(np.nanargmax(np.array(avg_purs))) # closest to 1
best_idx = np.unique(best_idx)
if(re_all):
return (cluster_labels, avg_sils,
ch_scs, dims, iters, lrs, sigmas, best_idx)
else:
return (cluster_labels[best_idx], avg_sils[best_idx],
ch_scs[best_idx], dims[best_idx], iters[best_idx],
lrs[best_idx], sigmas[best_idx])
| 2.71875 | 3 |
wapkg/remote.py | chickentuna/wapkg | 2 | 12791438 | import json
from urllib.request import urlopen
from urllib.error import URLError
from urllib.parse import urljoin
VERSION_REQUIRED = 3
EXTERNAL_LIST = 'https://pastebin.com/raw/aKjmATab'
# Returns repo index dictionary object, or None in case of failure
def fetch_index(repo_url):
try:
with urlopen(urljoin(repo_url, 'index.json')) as index_req:
index = json.loads(index_req.read().decode('utf-8'))
except URLError:
return None
if 'repo' not in index or not index['repo'] == 'wapkg':
return None
if not index['version'] == VERSION_REQUIRED:
if index['version'] > VERSION_REQUIRED:
print("! Source '" + repo_url + "' requires newer version of wapkg, " +
'consider upgrading your software in order to use this repo.')
return None
return index
def fetch_external_sources():
sources = []
try:
with urlopen(EXTERNAL_LIST) as lst_req:
for src in lst_req.read().decode('utf-8').split('\n'):
src_ = src.strip()
if len(src_) and not src_.startswith('#'):
sources.append(src_)
except URLError:
pass
return sources
# Unwraps the 'switch' content
def select_pkg(pkg, vs):
if not pkg:
return None
if 'switch' in pkg:
if not vs:
return None
switch = pkg['switch']
for v in switch:
if vs in v.split(','):
return switch[v]
if '*' in switch:
return switch['*']
return None
return pkg
# Returns True if package and all it's dependencies can be successfully installed
def trace_pkg_deps(pkgs_bundle, vs, name):
pkg = None
for pkgs in pkgs_bundle:
if name in pkgs:
pkg = pkgs[name]
break
pkg = select_pkg(pkg, vs)
if not pkg:
return False
if 'requirements' in pkg:
for req in pkg['requirements']:
if not trace_pkg_deps(pkgs_bundle, vs, req):
return False
return True
| 3.171875 | 3 |
min_win_substr.py | skokal01/Interview-Practice | 0 | 12791439 | <filename>min_win_substr.py
# https://discuss.leetcode.com/topic/30941/here-is-a-10-line-template-that-can-solve-most-substring-problems/12
#1. Use two pointers: start and end to represent a window.
#2. Move end to find a valid window.
#3. When a valid window is found, move start to find a smaller window.
from collections import defaultdict
from sys import maxint
def findSubString(str, pat):
import pdb
pdb.set_trace()
MAX_INT = maxint
start = end = 0
char_need = defaultdict(int) # the count of char needed by current window, negative means current window has it but not needs it
count_need = len(pat) # count of chars not in current window but in t
min_length = MAX_INT
min_start = 0
for i in pat:
# current window needs all char in t
char_need[i] += 1
while end < len(str):
if char_need[str[end]] > 0:
count_need -= 1
# current window contains s[end] now, so does not need it any more
char_need[str[end]] -= 1
end += 1
while count_need == 0:
if min_length > end - start:
min_length = end - start
min_start = start
# current window does not contain s[start] any more
char_need[str[start]] += 1
# when some count in char_need is positive, it means
# there is char in t but not current window
if char_need[str[start]] > 0:
count_need += 1
start += 1
return "" if min_length == MAX_INT else str[min_start:min_start + min_length]
print findSubString("ADOBECODEBANC", "ABC")
| 3.6875 | 4 |
translator/tasks.py | gsi-upm/eurosentiment-translator | 1 | 12791440 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import time
import traceback
from factory import create_celery_app
from .models import *
from .utils import translate_document
from datetime import timedelta, datetime
from StringIO import StringIO
celery = create_celery_app().celery
logger = celery.logger
@celery.task()
def process_request(tid):
logger.warning("TR id: {}".format(tid))
tr = TranslationRequest.objects.get(id=tid)
try:
tr.start()
if tr.infile:
infile = tr.infile
else:
infile = StringIO(tr.input)
template = tr.template.text
out = translate_document(infile=infile.get(),
template=template,
template_data=tr.to_mongo())
tr.outfile.delete()
tr.outfile.new_file(encoding="utf-8")
for chunk in out:
tr.outfile.write(chunk)
tr.outfile.close()
tr.save()
tr.status = TranslationRequest.SUCCESS
tr.finish()
logger.warning("Processed")
return tr
except Exception as ex:
raise
tr.status = TranslationRequest.ERROR
tr.message = str("{} -- {}".format(ex, traceback.format_exc()))
tr.finish()
@celery.task()
def clean_files():
logger.warning("Cleaning files")
olds = TranslationRequest.objects(infile__ne=None,
finished__lte=(datetime.now()-timedelta(days=1)))
logger.warning("Old files: {}".format(olds))
for req in olds:
req.clean_files()
logger.warning("Cleaned")
| 2.171875 | 2 |
celery_app/ipviews.py | tiaotiaolong/piu | 2 | 12791441 | from flask import Blueprint,request
from app import pa_domain,pa_ip
from .tasks import scan_ip_task
from celery_app.utils.utils import get_current_time,insert_taskid_db
ipscan_blueprint = Blueprint("ipscan", __name__, url_prefix='/ipscan')
#通过传入一个一级域名,对这个域名下的所有ip进行scan
@ipscan_blueprint.route('/scan')
def scan_ip():
domain = request.args.get("domain")
#在数据库搜索该domain的索引
domain_index=pa_domain.find_one({"domain":domain})
if domain_index:
# 声明ip_list
ip_list = []
#获取整个domain所对应的ip
for item in domain_index['subdomain']:
for ip_s in item['ip']:
ip_list.append(ip_s)
#对ip_list去重
ip_list=list(set(ip_list))
#调用scan_ip 任务 传入主域名和对应的ip列表
r=scan_ip_task.delay(domain,ip_list)
# taskid入库
insert_taskid_db({"task_id":r.task_id,"add_time":get_current_time(),"task_type":"ip_scan","ip_list":ip_list,"task_info":"对{0}域名下的{1}等{2}个ip进行端口扫描".format(domain,ip_list[0],len(ip_list))})
return {"code":200,"msg":"添加扫描任务成功"}
return {"code":201,"msg":"未找到该域名所对应ip"}
#获取ip总数
@ipscan_blueprint.route('/getipnum')
def get_ip_num():
return {"ip_num":pa_ip.find({}).count()}
#获取ip列表,index为起始索引 offset为数量
@ipscan_blueprint.route('/getiplist')
def get_ip_list():
result = []
tmp = {}
domain_index = int(request.args.get("index"))
domain_offset = int(request.args.get("offset"))
cursor = pa_ip.find().sort([('_id', -1)]).skip(domain_index).limit(domain_offset)
for document in cursor:
tmp['ip'] = document['ip']
tmp['add_time'] = document['add_time']
tmp['port'] = document['port']
result.append(tmp)
tmp = {}
return {"ip_list": result}
| 2.484375 | 2 |
zhuangzhuangml/api/state.py | Alpaca-Hub/zhuangzhuangml | 2 | 12791442 | from notebook.utils import url_path_join as ujoin
from notebook.base.handlers import IPythonHandler
import os, json, git, urllib, requests
from git import Repo, GitCommandError
from subprocess import check_output
import subprocess
repo = None
htable = []
config = {
"GIT_USER": "alpaca",
"GIT_PARENT_DIR": os.path.expanduser("~/Desktop/jupyter_versioning"),
"GIT_BRANCH_NAME": "main",
# "GIT_REMOTE_URL" : "alpaca",
# "GIT_REMOTE_UPSTREAM": "alpaca",
# "GITHUB_ACCESS_TOKEN": "<PASSWORD>"
}
# def delete_cell():
# if cell in htable:
# del htable[cell]
# return True
# return False
# def register_cell(cell, content):
# filename = str(config['GIT_PARENT_DIR'] + "/" + os.environ.get('GIT_REPO_NAME') + str(cell) + filename.replace('ipynb', 'txt'))
# subprocess.run(['cat', content, '>', filename])
# print(repo.git.add(filename))
# print(repo.git.commit( a=False, m="\nUpdated {}".format(filename) ))
| 2.125 | 2 |
rolz_bot/database.py | Reriiru/rolz_org_to_discord | 1 | 12791443 | from pymongo import MongoClient
from settings import MONGO_URL
client = MongoClient(MONGO_URL)
db = client.rolz_database
| 1.609375 | 2 |
naturalnum.py | gusenov/code-stepik-org-entrance-exam | 1 | 12791444 | <reponame>gusenov/code-stepik-org-entrance-exam<gh_stars>1-10
import sys
def expr(x):
a = x / (x - 2018)
b = (x - 500) / (x - 2500)
c = a - b
return c < 0
cnt = 0
for n in range(1, sys.maxsize):
# print("n = %d" % n)
if (n == 2018) or (n == 2500):
continue
if expr(n):
cnt += 1
print("cnt = %d" % cnt)
| 3.234375 | 3 |
src/covid_model_seiir_pipeline/pipeline/regression/model/containers.py | yukgu/covid-model-seiir-pipeline | 0 | 12791445 | <gh_stars>0
"""Containers for regression data."""
from dataclasses import dataclass
from typing import Dict, List, Iterator, Tuple, Union
import pandas as pd
from covid_model_seiir_pipeline.lib import (
utilities,
)
@dataclass
class RatioData:
infection_to_death: int
infection_to_admission: int
infection_to_case: int
ifr: pd.Series
ifr_hr: pd.Series
ifr_lr: pd.Series
ihr: pd.Series
idr: pd.Series
def to_dict(self) -> Dict[str, Union[int, pd.Series]]:
return utilities.asdict(self)
@dataclass
class HospitalCensusData:
hospital_census: pd.Series
icu_census: pd.Series
def to_dict(self) -> Dict[str, pd.Series]:
return utilities.asdict(self)
def to_df(self):
return pd.concat([v.rename(k) for k, v in self.to_dict().items()], axis=1)
@dataclass
class HospitalMetrics:
hospital_admissions: pd.Series
hospital_census: pd.Series
icu_admissions: pd.Series
icu_census: pd.Series
def to_dict(self) -> Dict[str, pd.Series]:
return utilities.asdict(self)
def to_df(self):
return pd.concat([v.rename(k) for k, v in self.to_dict().items()], axis=1)
@dataclass
class HospitalCorrectionFactors:
hospital_census: pd.Series
icu_census: pd.Series
def to_dict(self) -> Dict[str, pd.Series]:
return utilities.asdict(self)
def to_df(self):
return pd.concat([v.rename(k) for k, v in self.to_dict().items()], axis=1)
| 2.703125 | 3 |
tests/samples.py | ewerybody/svg.charts | 26 | 12791446 | """
Samples of the various charts. Run this script to generate the reference
samples.
"""
import os
from svg.charts.plot import Plot
from svg.charts import bar
from svg.charts import time_series
from svg.charts import pie
from svg.charts import schedule
from svg.charts import line
def sample_Plot():
g = Plot(
{
'min_x_value': 0,
'min_y_value': 0,
'area_fill': True,
'stagger_x_labels': True,
'stagger_y_labels': True,
'show_x_guidelines': True,
}
)
g.add_data({'data': [[1, 25], [2, 30], [3, 45]], 'title': 'series 1'})
g.add_data({'data': [[1, 30], [2, 31], [3, 40]], 'title': 'series 2'})
g.add_data({'data': [[0.5, 35], [1, 20], [3, 10.5]], 'title': 'series 3'})
return g
def sample_PlotTextLabels():
g = Plot(
{
'draw_lines_between_points': False,
'min_x_value': 0,
'min_y_value': 0,
'show_x_guidelines': True,
}
)
# Processed Apple production 2015
# Any object with a .text attribute will do;
# we like namedtuple().
from collections import namedtuple
Datum = namedtuple("Datum", "x y text")
g.add_data(
{
'data': [
Datum(8.24, 80.85, 'ES'),
Datum(0.17, 6.73, 'IE'),
Datum(0, 0, 'IS'),
],
'title': 'Processed Apple',
}
)
return g
def sample_TimeSeries():
g = time_series.Plot({})
g.timescale_divisions = '4 hours'
g.stagger_x_labels = True
g.x_label_format = '%d-%b %H:%M'
# g.max_y_value = 200
g.add_data(
{
'data': ['2005-12-21T00:00:00', 20, '2005-12-22T00:00:00', 21],
'title': 'series 1',
}
)
return g
def generate_samples():
yield 'Plot', sample_Plot()
yield 'PlotTextLabels', sample_PlotTextLabels()
yield 'TimeSeries', sample_TimeSeries()
yield 'VerticalBar', SampleBar.vertical()
yield 'HorizontalBar', SampleBar.horizontal()
yield 'VerticalBarLarge', SampleBar.vertical_large()
yield 'VerticalBarStackTop', SampleBar.vertical_top()
yield 'Pie', sample_Pie()
yield 'Schedule', sample_Schedule()
yield 'Line', sample_Line()
class SampleBar:
fields = ['Internet', 'TV', 'Newspaper', 'Magazine', 'Radio']
@classmethod
def vertical(cls):
g = bar.VerticalBar(cls.fields)
g.stack = 'side'
g.scale_integers = True
g.width, g.height = 640, 480
g.graph_title = 'Question 7'
g.show_graph_title = True
g.add_data({'data': [-2, 3, 1, 3, 1], 'title': 'Female'})
g.add_data({'data': [0, 2, 1, 5, 4], 'title': 'Male'})
return g
@classmethod
def horizontal(cls):
g = bar.HorizontalBar(cls.fields)
g.stack = 'side'
g.scale_integers = True
g.width, g.height = 640, 480
g.graph_title = 'Question 7'
g.show_graph_title = True
g.add_data({'data': [-2, 3, 1, 3, 1], 'title': 'Female'})
g.add_data({'data': [0, 2, 1, 5, 4], 'title': 'Male'})
return g
@classmethod
def vertical_large(cls):
g = bar.VerticalBar(cls.fields)
options = dict(
scale_integers=True,
stack='side',
width=640,
height=480,
graph_title='Question 8',
show_graph_title=True,
no_css=False,
)
g.__dict__.update(options)
g.add_data(dict(data=[2, 22, 98, 143, 82], title='intermediate'))
g.add_data(dict(data=[2, 26, 106, 193, 105], title='old'))
return g
@classmethod
def vertical_top(cls):
g = bar.VerticalBar(cls.fields, dict(stack='top'))
assert g.stack == 'top'
g.scale_integers = True
g.width, g.height = 640, 480
g.graph_title = 'Question 7'
g.show_graph_title = True
g.add_data({'data': [-2, 3, 1, 3, 1], 'title': 'Female'})
g.add_data({'data': [0, 2, 1, 5, 4], 'title': 'Male'})
return g
def sample_Line():
g = line.Line()
options = dict(
scale_integers=True,
area_fill=True,
width=640,
height=480,
fields=SampleBar.fields,
graph_title='Question 7',
show_graph_title=True,
no_css=False,
)
g.__dict__.update(options)
g.add_data({'data': [-2, 3, 1, 3, 1], 'title': 'Female'})
g.add_data({'data': [0, 2, 1, 5, 4], 'title': 'Male'})
return g
def sample_Pie():
g = pie.Pie({})
options = dict(
width=640,
height=480,
fields=SampleBar.fields,
graph_title='Question 7',
expand_greatest=True,
show_data_labels=True,
)
g.__dict__.update(options)
g.add_data({'data': [-2, 3, 1, 3, 1], 'title': 'Female'})
g.add_data({'data': [0, 2, 1, 5, 4], 'title': 'Male'})
return g
def sample_Schedule():
title = "Billy's Schedule"
data1 = [
"History 107",
"5/19/04",
"6/30/04",
"Algebra 011",
"6/2/04",
"8/11/04",
"Psychology 101",
"6/28/04",
"8/9/04",
"Acting 105",
"7/7/04",
"8/16/04",
]
g = schedule.Schedule(
dict(
width=640,
height=480,
graph_title=title,
show_graph_title=True,
key=False,
scale_x_integers=True,
scale_y_integers=True,
show_data_labels=True,
show_y_guidelines=False,
show_x_guidelines=True,
# show_x_title=True, # not yet implemented
x_title="Time",
show_y_title=False,
rotate_x_labels=True,
rotate_y_labels=False,
x_label_format="%m/%d",
timescale_divisions="1 week",
popup_format="%m/%d/%y",
area_fill=True,
min_y_value=0,
)
)
g.add_data(dict(data=data1, title="Data"))
return g
def save_samples():
root = os.path.dirname(__file__)
for sample_name, sample in generate_samples():
res = sample.burn()
with open(os.path.join(root, sample_name + '.py.svg'), 'w') as f:
f.write(res)
if __name__ == '__main__':
save_samples()
| 2.8125 | 3 |
SUL1/sample/data_reader_example/reader_sample.py | ddddwee1/SULT | 18 | 12791447 | import data_reader
import time
import tensorflow as tf
def worker(num):
time.sleep(0.5)
print(num)
return num
if __name__=='__main__':
data = list(range(100))
bsize = 10
reader = data_reader.data_reader(data, worker, bsize)
for i in range(10):
a = reader.get_next_batch()
print(a)
| 3 | 3 |
ariadna/__init__.py | dacabdi/ariadna | 0 | 12791448 | from .PathSplitter import PathSplitter
from .PathSplitter import RegexSplitter
__DEFAULT_PATH_SPLITTER__ = RegexSplitter
__cajas__ = [
'Caja',
'CajaMapping',
'CajaMutableMapping',
'CajaMutableSequence',
'CajaMutableSet',
'CajaSequence',
'CajaSet'
]
from .Caja import Caja
from .CajaMapping import CajaMapping
from .CajaMutableMapping import CajaMutableMapping
from .CajaMutableSequence import CajaMutableSequence
from .CajaMutableSet import CajaMutableSet
from .CajaSequence import CajaSequence
from .CajaSet import CajaSet
__DEFAULT_NONE_CAJA__ = CajaMutableMapping
__all__ = __cajas__ + ['PathSplitter', 'RegexSplitter'] | 1.414063 | 1 |
cpdb/config/storages.py | invinst/CPDBv2_backend | 25 | 12791449 | import mimetypes
from django.core.files.base import ContentFile
from django.core.files.storage import Storage
from django.utils.deconstruct import deconstructible
from django.conf import settings
from azure.storage.blob.models import ContentSettings
from azure.storage.blob.baseblobservice import BaseBlobService
from azure.storage.blob.blockblobservice import BlockBlobService
from azure.common import AzureMissingResourceHttpError
@deconstructible
class AzureStorage(Storage):
def __init__(self, azure_container=settings.AZURE_STATICFILES_CONTAINER, *args, **kwargs):
super(AzureStorage, self).__init__(*args, **kwargs)
self.account_name = settings.AZURE_STORAGE_ACCOUNT_NAME
self.account_key = settings.AZURE_STORAGE_ACCOUNT_KEY
self.azure_container = azure_container
self.azure_ssl = settings.AZURE_STATICFILES_SSL
self._base_blob_service = None
self._block_blob_service = None
@property
def base_blob_service(self):
if self._base_blob_service is None:
self._base_blob_service = BaseBlobService(
self.account_name, self.account_key)
return self._base_blob_service
@property
def block_blob_service(self):
if self._block_blob_service is None:
self._block_blob_service = BlockBlobService(
self.account_name, self.account_key)
return self._block_blob_service
@property
def azure_protocol(self):
if self.azure_ssl:
return 'https'
return 'http' if self.azure_ssl is not None else None
def _open(self, name, mode="rb"):
blob = self.base_blob_service.get_blob_to_bytes(self.azure_container, name)
return ContentFile(blob.content)
def exists(self, name):
return self.base_blob_service.exists(self.azure_container, name)
def delete(self, name):
try:
self.base_blob_service.delete_blob(self.azure_container, name)
except AzureMissingResourceHttpError: # pragma: no cover
pass
def size(self, name):
blob = self.base_blob_service.get_blob_properties(self.azure_container, name)
return blob.properties.content_length
def _save(self, name, content):
if hasattr(content.file, 'content_type'):
content_type = content.file.content_type
else:
content_type = mimetypes.guess_type(name)[0]
if hasattr(content, 'chunks'):
content_data = b''.join(chunk for chunk in content.chunks())
else:
content_data = content.read()
self.block_blob_service.create_blob_from_bytes(
self.azure_container, name,
content_data,
content_settings=ContentSettings(content_type=content_type))
return name
def url(self, name):
return self.base_blob_service.make_blob_url(
container_name=self.azure_container,
blob_name=name,
protocol=self.azure_protocol,
)
def get_modified_time(self, name):
blob = self.base_blob_service.get_blob_properties(
self.azure_container,
name
)
return blob.properties.last_modified
| 1.960938 | 2 |
test/SentimentAnalyzerTest.py | fractalbass/bayesian_trump | 0 | 12791450 | #--------------------------------------------------------------
# By <NAME>
# Painted Harmony Group, Inc
# June 26, 2017
# Please See LICENSE.txt
#--------------------------------------------------------------
import unittest
import SentimentAnalyzer as analyzer
class SentimentAnalyzerTest(unittest.TestCase):
def test_analyze_sentiment(self):
sa = analyzer.SentimentAnalyzer()
self.assertTrue(sa.analyze_sentiment("This is a happy tweet. Have a nice day.")=="pos")
self.assertTrue(sa.analyze_sentiment("I am angry. He is very disonest. Sad.")=="neg")
| 2.78125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.