_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q200
_nth
train
def _nth(arr, n): """ Return the nth value of array If it is missing return NaN """ try: return arr.iloc[n] except (KeyError, IndexError): return np.nan
python
{ "resource": "" }
q201
make_time
train
def make_time(h=0, m=0, s=0, ms=0, frames=None, fps=None): """ Convert time to milliseconds. See :func:`pysubs2.time.times_to_ms()`. When both frames and fps are specified, :func:`pysubs2.time.frames_to_ms()` is called instead. Raises: ValueError: Invalid fps, or one of frames/fps is missing. Example: >>> make_time(s=1.5) 1500 >>> make_time(frames=50, fps=25) 2000 """ if frames is None and fps is None: return times_to_ms(h, m, s, ms) elif frames is not None and fps is not None: return frames_to_ms(frames, fps) else: raise ValueError("Both fps and frames must be specified")
python
{ "resource": "" }
q202
SSAEvent.shift
train
def shift(self, h=0, m=0, s=0, ms=0, frames=None, fps=None): """ Shift start and end times. See :meth:`SSAFile.shift()` for full description. """ delta = make_time(h=h, m=m, s=s, ms=ms, frames=frames, fps=fps) self.start += delta self.end += delta
python
{ "resource": "" }
q203
SSAEvent.equals
train
def equals(self, other): """Field-based equality for SSAEvents.""" if isinstance(other, SSAEvent): return self.as_dict() == other.as_dict() else: raise TypeError("Cannot compare to non-SSAEvent object")
python
{ "resource": "" }
q204
SSAFile.load
train
def load(cls, path, encoding="utf-8", format_=None, fps=None, **kwargs): """ Load subtitle file from given path. Arguments: path (str): Path to subtitle file. encoding (str): Character encoding of input file. Defaults to UTF-8, you may need to change this. format_ (str): Optional, forces use of specific parser (eg. `"srt"`, `"ass"`). Otherwise, format is detected automatically from file contents. This argument should be rarely needed. fps (float): Framerate for frame-based formats (MicroDVD), for other formats this argument is ignored. Framerate might be detected from the file, in which case you don't need to specify it here (when given, this argument overrides autodetection). kwargs: Extra options for the parser. Returns: SSAFile Raises: IOError UnicodeDecodeError pysubs2.exceptions.UnknownFPSError pysubs2.exceptions.UnknownFormatIdentifierError pysubs2.exceptions.FormatAutodetectionError Note: pysubs2 may autodetect subtitle format and/or framerate. These values are set as :attr:`SSAFile.format` and :attr:`SSAFile.fps` attributes. Example: >>> subs1 = pysubs2.load("subrip-subtitles.srt") >>> subs2 = pysubs2.load("microdvd-subtitles.sub", fps=23.976) """ with open(path, encoding=encoding) as fp: return cls.from_file(fp, format_, fps=fps, **kwargs)
python
{ "resource": "" }
q205
SSAFile.from_string
train
def from_string(cls, string, format_=None, fps=None, **kwargs): """ Load subtitle file from string. See :meth:`SSAFile.load()` for full description. Arguments: string (str): Subtitle file in a string. Note that the string must be Unicode (in Python 2). Returns: SSAFile Example: >>> text = ''' ... 1 ... 00:00:00,000 --> 00:00:05,000 ... An example SubRip file. ... ''' >>> subs = SSAFile.from_string(text) """ fp = io.StringIO(string) return cls.from_file(fp, format_, fps=fps, **kwargs)
python
{ "resource": "" }
q206
SSAFile.from_file
train
def from_file(cls, fp, format_=None, fps=None, **kwargs): """ Read subtitle file from file object. See :meth:`SSAFile.load()` for full description. Note: This is a low-level method. Usually, one of :meth:`SSAFile.load()` or :meth:`SSAFile.from_string()` is preferable. Arguments: fp (file object): A file object, ie. :class:`io.TextIOBase` instance. Note that the file must be opened in text mode (as opposed to binary). Returns: SSAFile """ if format_ is None: # Autodetect subtitle format, then read again using correct parser. # The file might be a pipe and we need to read it twice, # so just buffer everything. text = fp.read() fragment = text[:10000] format_ = autodetect_format(fragment) fp = io.StringIO(text) impl = get_format_class(format_) subs = cls() # an empty subtitle file subs.format = format_ subs.fps = fps impl.from_file(subs, fp, format_, fps=fps, **kwargs) return subs
python
{ "resource": "" }
q207
SSAFile.save
train
def save(self, path, encoding="utf-8", format_=None, fps=None, **kwargs): """ Save subtitle file to given path. Arguments: path (str): Path to subtitle file. encoding (str): Character encoding of output file. Defaults to UTF-8, which should be fine for most purposes. format_ (str): Optional, specifies desired subtitle format (eg. `"srt"`, `"ass"`). Otherwise, format is detected automatically from file extension. Thus, this argument is rarely needed. fps (float): Framerate for frame-based formats (MicroDVD), for other formats this argument is ignored. When omitted, :attr:`SSAFile.fps` value is used (ie. the framerate used for loading the file, if any). When the :class:`SSAFile` wasn't loaded from MicroDVD, or if you wish save it with different framerate, use this argument. See also :meth:`SSAFile.transform_framerate()` for fixing bad frame-based to time-based conversions. kwargs: Extra options for the writer. Raises: IOError UnicodeEncodeError pysubs2.exceptions.UnknownFPSError pysubs2.exceptions.UnknownFormatIdentifierError pysubs2.exceptions.UnknownFileExtensionError """ if format_ is None: ext = os.path.splitext(path)[1].lower() format_ = get_format_identifier(ext) with open(path, "w", encoding=encoding) as fp: self.to_file(fp, format_, fps=fps, **kwargs)
python
{ "resource": "" }
q208
SSAFile.to_string
train
def to_string(self, format_, fps=None, **kwargs): """ Get subtitle file as a string. See :meth:`SSAFile.save()` for full description. Returns: str """ fp = io.StringIO() self.to_file(fp, format_, fps=fps, **kwargs) return fp.getvalue()
python
{ "resource": "" }
q209
SSAFile.to_file
train
def to_file(self, fp, format_, fps=None, **kwargs): """ Write subtitle file to file object. See :meth:`SSAFile.save()` for full description. Note: This is a low-level method. Usually, one of :meth:`SSAFile.save()` or :meth:`SSAFile.to_string()` is preferable. Arguments: fp (file object): A file object, ie. :class:`io.TextIOBase` instance. Note that the file must be opened in text mode (as opposed to binary). """ impl = get_format_class(format_) impl.to_file(self, fp, format_, fps=fps, **kwargs)
python
{ "resource": "" }
q210
SSAFile.rename_style
train
def rename_style(self, old_name, new_name): """ Rename a style, including references to it. Arguments: old_name (str): Style to be renamed. new_name (str): New name for the style (must be unused). Raises: KeyError: No style named old_name. ValueError: new_name is not a legal name (cannot use commas) or new_name is taken. """ if old_name not in self.styles: raise KeyError("Style %r not found" % old_name) if new_name in self.styles: raise ValueError("There is already a style called %r" % new_name) if not is_valid_field_content(new_name): raise ValueError("%r is not a valid name" % new_name) self.styles[new_name] = self.styles[old_name] del self.styles[old_name] for line in self: # XXX also handle \r override tag if line.style == old_name: line.style = new_name
python
{ "resource": "" }
q211
SSAFile.import_styles
train
def import_styles(self, subs, overwrite=True): """ Merge in styles from other SSAFile. Arguments: subs (SSAFile): Subtitle file imported from. overwrite (bool): On name conflict, use style from the other file (default: True). """ if not isinstance(subs, SSAFile): raise TypeError("Must supply an SSAFile.") for name, style in subs.styles.items(): if name not in self.styles or overwrite: self.styles[name] = style
python
{ "resource": "" }
q212
SSAFile.equals
train
def equals(self, other): """ Equality of two SSAFiles. Compares :attr:`SSAFile.info`, :attr:`SSAFile.styles` and :attr:`SSAFile.events`. Order of entries in OrderedDicts does not matter. "ScriptType" key in info is considered an implementation detail and thus ignored. Useful mostly in unit tests. Differences are logged at DEBUG level. """ if isinstance(other, SSAFile): for key in set(chain(self.info.keys(), other.info.keys())) - {"ScriptType"}: sv, ov = self.info.get(key), other.info.get(key) if sv is None: logging.debug("%r missing in self.info", key) return False elif ov is None: logging.debug("%r missing in other.info", key) return False elif sv != ov: logging.debug("info %r differs (self=%r, other=%r)", key, sv, ov) return False for key in set(chain(self.styles.keys(), other.styles.keys())): sv, ov = self.styles.get(key), other.styles.get(key) if sv is None: logging.debug("%r missing in self.styles", key) return False elif ov is None: logging.debug("%r missing in other.styles", key) return False elif sv != ov: for k in sv.FIELDS: if getattr(sv, k) != getattr(ov, k): logging.debug("difference in field %r", k) logging.debug("style %r differs (self=%r, other=%r)", key, sv.as_dict(), ov.as_dict()) return False if len(self) != len(other): logging.debug("different # of subtitles (self=%d, other=%d)", len(self), len(other)) return False for i, (se, oe) in enumerate(zip(self.events, other.events)): if not se.equals(oe): for k in se.FIELDS: if getattr(se, k) != getattr(oe, k): logging.debug("difference in field %r", k) logging.debug("event %d differs (self=%r, other=%r)", i, se.as_dict(), oe.as_dict()) return False return True else: raise TypeError("Cannot compare to non-SSAFile object")
python
{ "resource": "" }
q213
get_file_extension
train
def get_file_extension(format_): """Format identifier -> file extension""" if format_ not in FORMAT_IDENTIFIER_TO_FORMAT_CLASS: raise UnknownFormatIdentifierError(format_) for ext, f in FILE_EXTENSION_TO_FORMAT_IDENTIFIER.items(): if f == format_: return ext raise RuntimeError("No file extension for format %r" % format_)
python
{ "resource": "" }
q214
autodetect_format
train
def autodetect_format(content): """Return format identifier for given fragment or raise FormatAutodetectionError.""" formats = set() for impl in FORMAT_IDENTIFIER_TO_FORMAT_CLASS.values(): guess = impl.guess_format(content) if guess is not None: formats.add(guess) if len(formats) == 1: return formats.pop() elif not formats: raise FormatAutodetectionError("No suitable formats") else: raise FormatAutodetectionError("Multiple suitable formats (%r)" % formats)
python
{ "resource": "" }
q215
src_reload
train
async def src_reload(app, path: str = None): """ prompt each connected browser to reload by sending websocket message. :param path: if supplied this must be a path relative to app['static_path'], eg. reload of a single file is only supported for static resources. :return: number of sources reloaded """ cli_count = len(app[WS]) if cli_count == 0: return 0 is_html = None if path: path = str(Path(app['static_url']) / Path(path).relative_to(app['static_path'])) is_html = mimetypes.guess_type(path)[0] == 'text/html' reloads = 0 aux_logger.debug('prompting source reload for %d clients', cli_count) for ws, url in app[WS]: if path and is_html and path not in {url, url + '.html', url.rstrip('/') + '/index.html'}: aux_logger.debug('skipping reload for client at %s', url) continue aux_logger.debug('reload client at %s', url) data = { 'command': 'reload', 'path': path or url, 'liveCSS': True, 'liveImg': True, } try: await ws.send_str(json.dumps(data)) except RuntimeError as e: # eg. "RuntimeError: websocket connection is closing" aux_logger.error('Error broadcasting change to %s, RuntimeError: %s', path or url, e) else: reloads += 1 if reloads: s = '' if reloads == 1 else 's' aux_logger.info('prompted reload of %s on %d client%s', path or 'page', reloads, s) return reloads
python
{ "resource": "" }
q216
Settings.substitute_environ
train
def substitute_environ(self): """ Substitute environment variables into settings. """ for attr_name in dir(self): if attr_name.startswith('_') or attr_name.upper() != attr_name: continue orig_value = getattr(self, attr_name) is_required = isinstance(orig_value, Required) orig_type = orig_value.v_type if is_required else type(orig_value) env_var_name = self._ENV_PREFIX + attr_name env_var = os.getenv(env_var_name, None) if env_var is not None: if issubclass(orig_type, bool): env_var = env_var.upper() in ('1', 'TRUE') elif issubclass(orig_type, int): env_var = int(env_var) elif issubclass(orig_type, Path): env_var = Path(env_var) elif issubclass(orig_type, bytes): env_var = env_var.encode() # could do floats here and lists etc via json setattr(self, attr_name, env_var) elif is_required and attr_name not in self._custom_settings: raise RuntimeError('The required environment variable "{0}" is currently not set, ' 'you\'ll need to run `source activate.settings.sh` ' 'or you can set that single environment variable with ' '`export {0}="<value>"`'.format(env_var_name))
python
{ "resource": "" }
q217
serve
train
def serve(path, livereload, port, verbose): """ Serve static files from a directory. """ setup_logging(verbose) run_app(*serve_static(static_path=path, livereload=livereload, port=port))
python
{ "resource": "" }
q218
runserver
train
def runserver(**config): """ Run a development server for an aiohttp apps. Takes one argument "app-path" which should be a path to either a directory containing a recognized default file ("app.py" or "main.py") or to a specific file. Defaults to the environment variable "AIO_APP_PATH" or ".". The app path is run directly, see the "--app-factory" option for details on how an app is loaded from a python module. """ active_config = {k: v for k, v in config.items() if v is not None} setup_logging(config['verbose']) try: run_app(*_runserver(**active_config)) except AiohttpDevException as e: if config['verbose']: tb = click.style(traceback.format_exc().strip('\n'), fg='white', dim=True) main_logger.warning('AiohttpDevException traceback:\n%s', tb) main_logger.error('Error: %s', e) sys.exit(2)
python
{ "resource": "" }
q219
scenario
train
def scenario(weight=1, delay=0.0, name=None): """Decorator to register a function as a Molotov test. Options: - **weight** used by Molotov when the scenarii are randomly picked. The functions with the highest values are more likely to be picked. Integer, defaults to 1. This value is ignored when the *scenario_picker* decorator is used. - **delay** once the scenario is done, the worker will sleep *delay* seconds. Float, defaults to 0. The general --delay argument you can pass to Molotov will be summed with this delay. - **name** name of the scenario. If not provided, will use the function __name___ attribute. The decorated function receives an :class:`aiohttp.ClientSession` instance. """ def _scenario(func, *args, **kw): _check_coroutine(func) if weight > 0: sname = name or func.__name__ data = {'name': sname, 'weight': weight, 'delay': delay, 'func': func, 'args': args, 'kw': kw} _SCENARIO[sname] = data @functools.wraps(func) def __scenario(*args, **kw): return func(*args, **kw) return __scenario return _scenario
python
{ "resource": "" }
q220
request
train
def request(endpoint, verb='GET', session_options=None, **options): """Performs a synchronous request. Uses a dedicated event loop and aiohttp.ClientSession object. Options: - endpoint: the endpoint to call - verb: the HTTP verb to use (defaults: GET) - session_options: a dict containing options to initialize the session (defaults: None) - options: extra options for the request (defaults: None) Returns a dict object with the following keys: - content: the content of the response - status: the status - headers: a dict with all the response headers """ req = functools.partial(_request, endpoint, verb, session_options, **options) return _run_in_fresh_loop(req)
python
{ "resource": "" }
q221
get_var
train
def get_var(name, factory=None): """Gets a global variable given its name. If factory is not None and the variable is not set, factory is a callable that will set the variable. If not set, returns None. """ if name not in _VARS and factory is not None: _VARS[name] = factory() return _VARS.get(name)
python
{ "resource": "" }
q222
Worker.step
train
async def step(self, step_id, session, scenario=None): """ single scenario call. When it returns 1, it works. -1 the script failed, 0 the test is stopping or needs to stop. """ if scenario is None: scenario = pick_scenario(self.wid, step_id) try: await self.send_event('scenario_start', scenario=scenario) await scenario['func'](session, *scenario['args'], **scenario['kw']) await self.send_event('scenario_success', scenario=scenario) if scenario['delay'] > 0.: await cancellable_sleep(scenario['delay']) return 1 except Exception as exc: await self.send_event('scenario_failure', scenario=scenario, exception=exc) if self.args.verbose > 0: self.console.print_error(exc) await self.console.flush() return -1
python
{ "resource": "" }
q223
main
train
def main(): """Moloslave clones a git repo and runs a molotov test """ parser = argparse.ArgumentParser(description='Github-based load test') parser.add_argument('--version', action='store_true', default=False, help='Displays version and exits.') parser.add_argument('--virtualenv', type=str, default='virtualenv', help='Virtualenv executable.') parser.add_argument('--python', type=str, default=sys.executable, help='Python executable.') parser.add_argument('--config', type=str, default='molotov.json', help='Path of the configuration file.') parser.add_argument('repo', help='Github repo', type=str, nargs="?") parser.add_argument('run', help='Test to run', nargs="?") args = parser.parse_args() if args.version: print(__version__) sys.exit(0) tempdir = tempfile.mkdtemp() curdir = os.getcwd() os.chdir(tempdir) print('Working directory is %s' % tempdir) try: clone_repo(args.repo) config_file = os.path.join(tempdir, args.config) with open(config_file) as f: config = json.loads(f.read()) # creating the virtualenv create_virtualenv(args.virtualenv, args.python) # install deps if 'requirements' in config['molotov']: install_reqs(config['molotov']['requirements']) # load deps into sys.path pyver = '%d.%d' % (sys.version_info.major, sys.version_info.minor) site_pkg = os.path.join(tempdir, 'venv', 'lib', 'python' + pyver, 'site-packages') site.addsitedir(site_pkg) pkg_resources.working_set.add_entry(site_pkg) # environment if 'env' in config['molotov']: for key, value in config['molotov']['env'].items(): os.environ[key] = value run_test(**config['molotov']['tests'][args.run]) except Exception: os.chdir(curdir) shutil.rmtree(tempdir, ignore_errors=True) raise
python
{ "resource": "" }
q224
copy_files
train
def copy_files(source_files, target_directory, source_directory=None): """Copies a list of files to the specified directory. If source_directory is provided, it will be prepended to each source file.""" try: os.makedirs(target_directory) except: # TODO: specific exception? pass for f in source_files: source = os.path.join(source_directory, f) if source_directory else f target = os.path.join(target_directory, f) shutil.copy2(source, target)
python
{ "resource": "" }
q225
yes_or_no
train
def yes_or_no(message): """Gets user input and returns True for yes and False for no.""" while True: print message, '(yes/no)', line = raw_input() if line is None: return None line = line.lower() if line == 'y' or line == 'ye' or line == 'yes': return True if line == 'n' or line == 'no': return False
python
{ "resource": "" }
q226
add_plugin
train
def add_plugin(plugin, directory=None): """Adds the specified plugin. This returns False if it was already added.""" repo = require_repo(directory) plugins = get_value(repo, 'plugins', expect_type=dict) if plugin in plugins: return False plugins[plugin] = {} set_value(repo, 'plugins', plugins) return True
python
{ "resource": "" }
q227
get_plugin_settings
train
def get_plugin_settings(plugin, directory=None): """Gets the settings for the specified plugin.""" repo = require_repo(directory) plugins = get_value(repo, 'plugins') return plugins.get(plugin) if isinstance(plugins, dict) else None
python
{ "resource": "" }
q228
preview
train
def preview(directory=None, host=None, port=None, watch=True): """Runs a local server to preview the working directory of a repository.""" directory = directory or '.' host = host or '127.0.0.1' port = port or 5000 # TODO: admin interface # TODO: use cache_only to keep from modifying output directly out_directory = build(directory) # Serve generated site os.chdir(out_directory) Handler = SimpleHTTPServer.SimpleHTTPRequestHandler httpd = SocketServer.TCPServer((host, port), Handler) print ' * Serving on http://%s:%s/' % (host, port) httpd.serve_forever()
python
{ "resource": "" }
q229
require_repo
train
def require_repo(directory=None): """Checks for a presentation repository and raises an exception if not found.""" if directory and not os.path.isdir(directory): raise ValueError('Directory not found: ' + repr(directory)) repo = repo_path(directory) if not os.path.isdir(repo): raise RepositoryNotFoundError(directory) return repo
python
{ "resource": "" }
q230
init
train
def init(directory=None): """Initializes a Gitpress presentation repository at the specified directory.""" repo = repo_path(directory) if os.path.isdir(repo): raise RepositoryAlreadyExistsError(directory, repo) # Initialize repository with default template shutil.copytree(default_template_path, repo) message = '"Default presentation content."' subprocess.call(['git', 'init', '-q', repo]) subprocess.call(['git', 'add', '.'], cwd=repo) subprocess.call(['git', 'commit', '-q', '-m', message], cwd=repo) return repo
python
{ "resource": "" }
q231
iterate_presentation_files
train
def iterate_presentation_files(path=None, excludes=None, includes=None): """Iterates the repository presentation files relative to 'path', not including themes. Note that 'includes' take priority.""" # Defaults if includes is None: includes = [] if excludes is None: excludes = [] # Transform glob patterns to regular expressions includes_pattern = r'|'.join([fnmatch.translate(x) for x in includes]) or r'$.' excludes_pattern = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.' includes_re = re.compile(includes_pattern) excludes_re = re.compile(excludes_pattern) def included(root, name): """Returns True if the specified file is a presentation file.""" full_path = os.path.join(root, name) # Explicitly included files takes priority if includes_re.match(full_path): return True # Ignore special and excluded files return (not specials_re.match(name) and not excludes_re.match(full_path)) # Get a filtered list of paths to be built for root, dirs, files in os.walk(path): dirs[:] = [d for d in dirs if included(root, d)] files = [f for f in files if included(root, f)] for f in files: yield os.path.relpath(os.path.join(root, f), path)
python
{ "resource": "" }
q232
read_config_file
train
def read_config_file(path): """Returns the configuration from the specified file.""" try: with open(path, 'r') as f: return json.load(f, object_pairs_hook=OrderedDict) except IOError as ex: if ex != errno.ENOENT: raise return {}
python
{ "resource": "" }
q233
write_config
train
def write_config(repo_directory, config): """Writes the specified configuration to the presentation repository.""" return write_config_file(os.path.join(repo_directory, config_file), config)
python
{ "resource": "" }
q234
write_config_file
train
def write_config_file(path, config): """Writes the specified configuration to the specified file.""" contents = json.dumps(config, indent=4, separators=(',', ': ')) + '\n' try: with open(path, 'w') as f: f.write(contents) return True except IOError as ex: if ex != errno.ENOENT: raise return False
python
{ "resource": "" }
q235
get_value
train
def get_value(repo_directory, key, expect_type=None): """Gets the value of the specified key in the config file.""" config = read_config(repo_directory) value = config.get(key) if expect_type and value is not None and not isinstance(value, expect_type): raise ConfigSchemaError('Expected config variable %s to be type %s, got %s' % (repr(key), repr(expect_type), repr(type(value)))) return value
python
{ "resource": "" }
q236
set_value
train
def set_value(repo_directory, key, value, strict=True): """Sets the value of a particular key in the config file. This has no effect when setting to the same value.""" if value is None: raise ValueError('Argument "value" must not be None.') # Read values and do nothing if not making any changes config = read_config(repo_directory) old = config.get(key) if old == value: return old # Check schema if strict and old is not None and not isinstance(old, type(value)): raise ConfigSchemaError('Expected config variable %s to be type %s, got %s' % (repr(key), repr(type(value)), repr(type(old)))) # Set new value and save results config[key] = value write_config(repo_directory, config) return old
python
{ "resource": "" }
q237
build
train
def build(content_directory=None, out_directory=None): """Builds the site from its content and presentation repository.""" content_directory = content_directory or '.' out_directory = os.path.abspath(out_directory or default_out_directory) repo = require_repo(content_directory) # Prevent user mistakes if out_directory == '.': raise ValueError('Output directory must be different than the source directory: ' + repr(out_directory)) if os.path.basename(os.path.relpath(out_directory, content_directory)) == '..': raise ValueError('Output directory must not contain the source directory: ' + repr(out_directory)) # TODO: read config # TODO: use virtualenv # TODO: init and run plugins # TODO: process with active theme # Collect and copy static files files = presentation_files(repo) remove_directory(out_directory) copy_files(files, out_directory, repo) return out_directory
python
{ "resource": "" }
q238
gpp
train
def gpp(argv=None): """Shortcut function for running the previewing command.""" if argv is None: argv = sys.argv[1:] argv.insert(0, 'preview') return main(argv)
python
{ "resource": "" }
q239
use_theme
train
def use_theme(theme, directory=None): """Switches to the specified theme. This returns False if switching to the already active theme.""" repo = require_repo(directory) if theme not in list_themes(directory): raise ThemeNotFoundError(theme) old_theme = set_value(repo, 'theme', theme) return old_theme != theme
python
{ "resource": "" }
q240
data_type
train
def data_type(data, grouped=False, columns=None, key_on='idx', iter_idx=None): '''Data type check for automatic import''' if iter_idx: return Data.from_mult_iters(idx=iter_idx, **data) if pd: if isinstance(data, (pd.Series, pd.DataFrame)): return Data.from_pandas(data, grouped=grouped, columns=columns, key_on=key_on) if isinstance(data, (list, tuple, dict)): return Data.from_iter(data) else: raise ValueError('This data type is not supported by Vincent.')
python
{ "resource": "" }
q241
Map.rebind
train
def rebind(self, column=None, brew='GnBu'): """Bind a new column to the data map Parameters ---------- column: str, default None Pandas DataFrame column name brew: str, default None Color brewer abbreviation. See colors.py """ self.data['table'] = Data.keypairs( self.raw_data, columns=[self.data_key, column]) domain = [Data.serialize(self.raw_data[column].min()), Data.serialize(self.raw_data[column].quantile(0.95))] scale = Scale(name='color', type='quantize', domain=domain, range=brews[brew]) self.scales['color'] = scale
python
{ "resource": "" }
q242
Visualization.axis_titles
train
def axis_titles(self, x=None, y=None): """Apply axis titles to the figure. This is a convenience method for manually modifying the "Axes" mark. Parameters ---------- x: string, default 'null' X-axis title y: string, default 'null' Y-axis title Example ------- >>>vis.axis_titles(y="Data 1", x="Data 2") """ keys = self.axes.get_keys() if keys: for key in keys: if key == 'x': self.axes[key].title = x elif key == 'y': self.axes[key].title = y else: self.axes.extend([Axis(type='x', title=x), Axis(type='y', title=y)]) return self
python
{ "resource": "" }
q243
Visualization._set_axis_properties
train
def _set_axis_properties(self, axis): """Set AxisProperties and PropertySets""" if not getattr(axis, 'properties'): axis.properties = AxisProperties() for prop in ['ticks', 'axis', 'major_ticks', 'minor_ticks', 'title', 'labels']: setattr(axis.properties, prop, PropertySet())
python
{ "resource": "" }
q244
Visualization._set_all_axis_color
train
def _set_all_axis_color(self, axis, color): """Set axis ticks, title, labels to given color""" for prop in ['ticks', 'axis', 'major_ticks', 'minor_ticks', 'title', 'labels']: prop_set = getattr(axis.properties, prop) if color and prop in ['title', 'labels']: prop_set.fill = ValueRef(value=color) elif color and prop in ['axis', 'major_ticks', 'minor_ticks', 'ticks']: prop_set.stroke = ValueRef(value=color)
python
{ "resource": "" }
q245
Visualization._axis_properties
train
def _axis_properties(self, axis, title_size, title_offset, label_angle, label_align, color): """Assign axis properties""" if self.axes: axis = [a for a in self.axes if a.scale == axis][0] self._set_axis_properties(axis) self._set_all_axis_color(axis, color) if title_size: axis.properties.title.font_size = ValueRef(value=title_size) if label_angle: axis.properties.labels.angle = ValueRef(value=label_angle) if label_align: axis.properties.labels.align = ValueRef(value=label_align) if title_offset: axis.properties.title.dy = ValueRef(value=title_offset) else: raise ValueError('This Visualization has no axes!')
python
{ "resource": "" }
q246
Visualization.common_axis_properties
train
def common_axis_properties(self, color=None, title_size=None): """Set common axis properties such as color Parameters ---------- color: str, default None Hex color str, etc """ if self.axes: for axis in self.axes: self._set_axis_properties(axis) self._set_all_axis_color(axis, color) if title_size: ref = ValueRef(value=title_size) axis.properties.title.font_size = ref else: raise ValueError('This Visualization has no axes!') return self
python
{ "resource": "" }
q247
Visualization.x_axis_properties
train
def x_axis_properties(self, title_size=None, title_offset=None, label_angle=None, label_align=None, color=None): """Change x-axis title font size and label angle Parameters ---------- title_size: int, default None Title size, in px title_offset: int, default None Pixel offset from given axis label_angle: int, default None label angle in degrees label_align: str, default None Label alignment color: str, default None Hex color """ self._axis_properties('x', title_size, title_offset, label_angle, label_align, color) return self
python
{ "resource": "" }
q248
Visualization.y_axis_properties
train
def y_axis_properties(self, title_size=None, title_offset=None, label_angle=None, label_align=None, color=None): """Change y-axis title font size and label angle Parameters ---------- title_size: int, default None Title size, in px title_offset: int, default None Pixel offset from given axis label_angle: int, default None label angle in degrees label_align: str, default None Label alignment color: str, default None Hex color """ self._axis_properties('y', title_size, title_offset, label_angle, label_align, color) return self
python
{ "resource": "" }
q249
Visualization.legend
train
def legend(self, title=None, scale='color', text_color=None): """Convience method for adding a legend to the figure. Important: This defaults to the color scale that is generated with Line, Area, Stacked Line, etc charts. For bar charts, the scale ref is usually 'y'. Parameters ---------- title: string, default None Legend Title scale: string, default 'color' Scale reference for legend text_color: str, default None Title and label color """ self.legends.append(Legend(title=title, fill=scale, offset=0, properties=LegendProperties())) if text_color: color_props = PropertySet(fill=ValueRef(value=text_color)) self.legends[0].properties.labels = color_props self.legends[0].properties.title = color_props return self
python
{ "resource": "" }
q250
Visualization.colors
train
def colors(self, brew=None, range_=None): """Convenience method for adding color brewer scales to charts with a color scale, such as stacked or grouped bars. See the colors here: http://colorbrewer2.org/ Or here: http://bl.ocks.org/mbostock/5577023 This assumes that a 'color' scale exists on your chart. Parameters ---------- brew: string, default None Color brewer scheme (BuGn, YlOrRd, etc) range: list, default None List of colors. Ex: ['#ac4142', '#d28445', '#f4bf75'] """ if brew: self.scales['color'].range = brews[brew] elif range_: self.scales['color'].range = range_ return self
python
{ "resource": "" }
q251
Visualization.validate
train
def validate(self, require_all=True, scale='colors'): """Validate the visualization contents. Parameters ---------- require_all : boolean, default True If True (default), then all fields ``data``, ``scales``, ``axes``, and ``marks`` must be defined. The user is allowed to disable this if the intent is to define the elements client-side. If the contents of the visualization are not valid Vega, then a :class:`ValidationError` is raised. """ super(self.__class__, self).validate() required_attribs = ('data', 'scales', 'axes', 'marks') for elem in required_attribs: attr = getattr(self, elem) if attr: # Validate each element of the sets of data, etc for entry in attr: entry.validate() names = [a.name for a in attr] if len(names) != len(set(names)): raise ValidationError(elem + ' has duplicate names') elif require_all: raise ValidationError( elem + ' must be defined for valid visualization')
python
{ "resource": "" }
q252
Visualization.display
train
def display(self): """Display the visualization inline in the IPython notebook. This is deprecated, use the following instead:: from IPython.display import display display(viz) """ from IPython.core.display import display, HTML display(HTML(self._repr_html_()))
python
{ "resource": "" }
q253
Data.validate
train
def validate(self, *args): """Validate contents of class """ super(self.__class__, self).validate(*args) if not self.name: raise ValidationError('name is required for Data')
python
{ "resource": "" }
q254
Data.serialize
train
def serialize(obj): """Convert an object into a JSON-serializable value This is used by the ``from_pandas`` and ``from_numpy`` functions to convert data to JSON-serializable types when loading. """ if isinstance(obj, str_types): return obj elif hasattr(obj, 'timetuple'): return int(time.mktime(obj.timetuple())) * 1000 elif hasattr(obj, 'item'): return obj.item() elif hasattr(obj, '__float__'): if isinstance(obj, int): return int(obj) else: return float(obj) elif hasattr(obj, '__int__'): return int(obj) else: raise LoadError('cannot serialize index of type ' + type(obj).__name__)
python
{ "resource": "" }
q255
Data.from_pandas
train
def from_pandas(cls, data, columns=None, key_on='idx', name=None, series_key='data', grouped=False, records=False, **kwargs): """Load values from a pandas ``Series`` or ``DataFrame`` object Parameters ---------- data : pandas ``Series`` or ``DataFrame`` Pandas object to import data from. columns: list, default None DataFrame columns to convert to Data. Keys default to col names. If columns are given and on_index is False, x-axis data will default to the first column. key_on: string, default 'index' Value to key on for x-axis data. Defaults to index. name : string, default None Applies to the ``name`` attribute of the generated class. If ``None`` (default), then the ``name`` attribute of ``pd_obj`` is used if it exists, or ``'table'`` if it doesn't. series_key : string, default 'data' Applies only to ``Series``. If ``None`` (default), then defaults to data.name. For example, if ``series_key`` is ``'x'``, then the entries of the ``values`` list will be ``{'idx': ..., 'col': 'x', 'val': ...}``. grouped: boolean, default False Pass true for an extra grouping parameter records: boolean, defaule False Requires Pandas 0.12 or greater. Writes the Pandas DataFrame using the df.to_json(orient='records') formatting. **kwargs : dict Additional arguments passed to the :class:`Data` constructor. """ # Note: There's an experimental JSON encoder floating around in # pandas land that hasn't made it into the main branch. This # function should be revisited if it ever does. if not pd: raise LoadError('pandas could not be imported') if not hasattr(data, 'index'): raise ValueError('Please load a Pandas object.') if name: vega_data = cls(name=name, **kwargs) else: vega_data = cls(name='table', **kwargs) pd_obj = data.copy() if columns: pd_obj = data[columns] if key_on != 'idx': pd_obj.index = data[key_on] if records: # The worst vega_data.values = json.loads(pd_obj.to_json(orient='records')) return vega_data vega_data.values = [] if isinstance(pd_obj, pd.Series): data_key = data.name or series_key for i, v in pd_obj.iteritems(): value = {} value['idx'] = cls.serialize(i) value['col'] = data_key value['val'] = cls.serialize(v) vega_data.values.append(value) elif isinstance(pd_obj, pd.DataFrame): # We have to explicitly convert the column names to strings # because the json serializer doesn't allow for integer keys. for i, row in pd_obj.iterrows(): for num, (k, v) in enumerate(row.iteritems()): value = {} value['idx'] = cls.serialize(i) value['col'] = cls.serialize(k) value['val'] = cls.serialize(v) if grouped: value['group'] = num vega_data.values.append(value) else: raise ValueError('cannot load from data type ' + type(pd_obj).__name__) return vega_data
python
{ "resource": "" }
q256
Data.from_numpy
train
def from_numpy(cls, np_obj, name, columns, index=None, index_key=None, **kwargs): """Load values from a numpy array Parameters ---------- np_obj : numpy.ndarray numpy array to load data from name : string ``name`` field for the data columns : iterable Sequence of column names, from left to right. Must have same length as the number of columns of ``np_obj``. index : iterable, default None Sequence of indices from top to bottom. If ``None`` (default), then the indices are integers starting at 0. Must have same length as the number of rows of ``np_obj``. index_key : string, default None Key to use for the index. If ``None`` (default), ``idx`` is used. **kwargs : dict Additional arguments passed to the :class:`Data` constructor Notes ----- The individual elements of ``np_obj``, ``columns``, and ``index`` must return valid values from :func:`Data.serialize`. """ if not np: raise LoadError('numpy could not be imported') _assert_is_type('numpy object', np_obj, np.ndarray) # Integer index if none is provided index = index or range(np_obj.shape[0]) # Explicitly map dict-keys to strings for JSON serializer. columns = list(map(str, columns)) index_key = index_key or cls._default_index_key if len(index) != np_obj.shape[0]: raise LoadError( 'length of index must be equal to number of rows of array') elif len(columns) != np_obj.shape[1]: raise LoadError( 'length of columns must be equal to number of columns of ' 'array') data = cls(name=name, **kwargs) data.values = [ dict([(index_key, cls.serialize(idx))] + [(col, x) for col, x in zip(columns, row)]) for idx, row in zip(index, np_obj.tolist())] return data
python
{ "resource": "" }
q257
Data.from_mult_iters
train
def from_mult_iters(cls, name=None, idx=None, **kwargs): """Load values from multiple iters Parameters ---------- name : string, default None Name of the data set. If None (default), the name will be set to ``'table'``. idx: string, default None Iterable to use for the data index **kwargs : dict of iterables The ``values`` field will contain dictionaries with keys for each of the iterables provided. For example, d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30)) would result in ``d`` having a ``values`` field with [{'idx': 0, 'col': 'y', 'val': 10}, {'idx': 1, 'col': 'y', 'val': 20} If the iterables are not the same length, then ValueError is raised. """ if not name: name = 'table' lengths = [len(v) for v in kwargs.values()] if len(set(lengths)) != 1: raise ValueError('Iterables must all be same length') if not idx: raise ValueError('Must provide iter name index reference') index = kwargs.pop(idx) vega_vals = [] for k, v in sorted(kwargs.items()): for idx, val in zip(index, v): value = {} value['idx'] = idx value['col'] = k value['val'] = val vega_vals.append(value) return cls(name, values=vega_vals)
python
{ "resource": "" }
q258
Data.from_iter
train
def from_iter(cls, data, name=None): """Convenience method for loading data from an iterable. Defaults to numerical indexing for x-axis. Parameters ---------- data: iterable An iterable of data (list, tuple, dict of key/val pairs) name: string, default None Name of the data set. If None (default), the name will be set to ``'table'``. """ if not name: name = 'table' if isinstance(data, (list, tuple)): data = {x: y for x, y in enumerate(data)} values = [{'idx': k, 'col': 'data', 'val': v} for k, v in sorted(data.items())] return cls(name, values=values)
python
{ "resource": "" }
q259
Data._numpy_to_values
train
def _numpy_to_values(data): '''Convert a NumPy array to values attribute''' def to_list_no_index(xvals, yvals): return [{"x": x, "y": np.asscalar(y)} for x, y in zip(xvals, yvals)] if len(data.shape) == 1 or data.shape[1] == 1: xvals = range(data.shape[0] + 1) values = to_list_no_index(xvals, data) elif len(data.shape) == 2: if data.shape[1] == 2: # NumPy arrays and matrices have different iteration rules. if isinstance(data, np.matrix): xidx = (0, 0) yidx = (0, 1) else: xidx = 0 yidx = 1 xvals = [np.asscalar(row[xidx]) for row in data] yvals = [np.asscalar(row[yidx]) for row in data] values = [{"x": x, "y": y} for x, y in zip(xvals, yvals)] else: raise ValueError('arrays with > 2 columns not supported') else: raise ValueError('invalid dimensions for ndarray') return values
python
{ "resource": "" }
q260
Data.to_json
train
def to_json(self, validate=False, pretty_print=True, data_path=None): """Convert data to JSON Parameters ---------- data_path : string If not None, then data is written to a separate file at the specified path. Note that the ``url`` attribute if the data must be set independently for the data to load correctly. Returns ------- string Valid Vega JSON. """ # TODO: support writing to separate file return super(self.__class__, self).to_json(validate=validate, pretty_print=pretty_print)
python
{ "resource": "" }
q261
_assert_is_type
train
def _assert_is_type(name, value, value_type): """Assert that a value must be a given type.""" if not isinstance(value, value_type): if type(value_type) is tuple: types = ', '.join(t.__name__ for t in value_type) raise ValueError('{0} must be one of ({1})'.format(name, types)) else: raise ValueError('{0} must be {1}' .format(name, value_type.__name__))
python
{ "resource": "" }
q262
grammar
train
def grammar(grammar_type=None, grammar_name=None): """Decorator to define properties that map to the ``grammar`` dict. This dict is the canonical representation of the Vega grammar within Vincent. This decorator is intended for classes that map to some pre-defined JSON structure, such as axes, data, marks, scales, etc. It is assumed that this decorates functions with an instance of ``self.grammar``. Parameters ---------- grammar_type : type or tuple of types, default None If the argument to the decorated function is not of the given types, then a ValueError is raised. No type checking is done if the type is None (default). grammar_name : string, default None An optional name to map to the internal ``grammar`` dict. If None (default), then the key for the dict is the name of the function being decorated. If not None, then it will be the name specified here. This is useful if the expected JSON field name is a Python keyword or has an un-Pythonic name. This should decorate a "validator" function that should return no value but raise an exception if the provided value is not valid Vega grammar. If the validator throws no exception, then the value is assigned to the ``grammar`` dict. The validator function should take only one argument - the value to be validated - so that no ``self`` argument is included; the validator should not modify the class. If no arguments are given, then no type-checking is done the property will be mapped to a field with the name of the decorated function. The doc string for the property is taken from the validator functions's doc string. """ def grammar_creator(validator, name): def setter(self, value): if isinstance(grammar_type, (type, tuple)): _assert_is_type(validator.__name__, value, grammar_type) validator(value) self.grammar[name] = value def getter(self): return self.grammar.get(name, None) def deleter(self): if name in self.grammar: del self.grammar[name] return property(getter, setter, deleter, validator.__doc__) if isinstance(grammar_type, (type, tuple)): # If grammar_type is a type, return another decorator. def grammar_dec(validator): # Make sure to use the grammar name if it's there. if grammar_name: return grammar_creator(validator, grammar_name) else: return grammar_creator(validator, validator.__name__) return grammar_dec elif isinstance(grammar_name, str_types): # If grammar_name is a string, use that name and return another # decorator. def grammar_dec(validator): return grammar_creator(validator, grammar_name) return grammar_dec else: # Otherwise we assume that grammar_type is actually the function being # decorated. return grammar_creator(grammar_type, grammar_type.__name__)
python
{ "resource": "" }
q263
GrammarClass.validate
train
def validate(self): """Validate the contents of the object. This calls ``setattr`` for each of the class's grammar properties. It will catch ``ValueError``s raised by the grammar property's setters and re-raise them as :class:`ValidationError`. """ for key, val in self.grammar.items(): try: setattr(self, key, val) except ValueError as e: raise ValidationError('invalid contents: ' + e.args[0])
python
{ "resource": "" }
q264
GrammarClass.to_json
train
def to_json(self, path=None, html_out=False, html_path='vega_template.html', validate=False, pretty_print=True): """Convert object to JSON Parameters ---------- path: string, default None Path to write JSON out. If there is no path provided, JSON will be returned as a string to the console. html_out: boolean, default False If True, vincent will output an simple HTML scaffold to visualize the vega json output. html_path: string, default 'vega_template.html' Path for the html file (if html_out=True) validate : boolean If True, call the object's `validate` method before serializing. Default is False. pretty_print : boolean If True (default), JSON is printed in more-readable form with indentation and spaces. Returns ------- string JSON serialization of the class's grammar properties. """ if validate: self.validate() if pretty_print: dumps_args = {'indent': 2, 'separators': (',', ': ')} else: dumps_args = {} def encoder(obj): if hasattr(obj, 'grammar'): return obj.grammar if html_out: template = Template( str(resource_string('vincent', 'vega_template.html'))) with open(html_path, 'w') as f: f.write(template.substitute(path=path)) if path: with open(path, 'w') as f: json.dump(self.grammar, f, default=encoder, sort_keys=True, **dumps_args) else: return json.dumps(self.grammar, default=encoder, sort_keys=True, **dumps_args)
python
{ "resource": "" }
q265
useful_mimetype
train
def useful_mimetype(text): """Check to see if the given mime type is a MIME type which is useful in terms of how to treat this file. """ if text is None: return False mimetype = normalize_mimetype(text) return mimetype not in [DEFAULT, PLAIN, None]
python
{ "resource": "" }
q266
normalize_extension
train
def normalize_extension(extension): """Normalise a file name extension.""" extension = decode_path(extension) if extension is None: return if extension.startswith('.'): extension = extension[1:] if '.' in extension: _, extension = os.path.splitext(extension) extension = slugify(extension, sep='') if extension is None: return if len(extension): return extension
python
{ "resource": "" }
q267
fetch
train
def fetch(url: str, **kwargs) -> Selector: """ Send HTTP request and parse it as a DOM tree. Args: url (str): The url of the site. Returns: Selector: allows you to select parts of HTML text using CSS or XPath expressions. """ kwargs.setdefault('headers', DEFAULT_HEADERS) try: res = requests.get(url, **kwargs) res.raise_for_status() except requests.RequestException as e: print(e) else: html = res.text tree = Selector(text=html) return tree
python
{ "resource": "" }
q268
async_fetch
train
async def async_fetch(url: str, **kwargs) -> Selector: """ Do the fetch in an async style. Args: url (str): The url of the site. Returns: Selector: allows you to select parts of HTML text using CSS or XPath expressions. """ kwargs.setdefault('headers', DEFAULT_HEADERS) async with aiohttp.ClientSession(**kwargs) as ses: async with ses.get(url, **kwargs) as res: html = await res.text() tree = Selector(text=html) return tree
python
{ "resource": "" }
q269
links
train
def links(res: requests.models.Response, search: str = None, pattern: str = None) -> list: """Get the links of the page. Args: res (requests.models.Response): The response of the page. search (str, optional): Defaults to None. Search the links you want. pattern (str, optional): Defaults to None. Search the links use a regex pattern. Returns: list: All the links of the page. """ hrefs = [link.to_text() for link in find_all_links(res.text)] if search: hrefs = [href for href in hrefs if search in href] if pattern: hrefs = [href for href in hrefs if re.findall(pattern, href)] return list(set(hrefs))
python
{ "resource": "" }
q270
save_as_json
train
def save_as_json(total: list, name='data.json', sort_by: str = None, no_duplicate=False, order='asc'): """Save what you crawled as a json file. Args: total (list): Total of data you crawled. name (str, optional): Defaults to 'data.json'. The name of the file. sort_by (str, optional): Defaults to None. Sort items by a specific key. no_duplicate (bool, optional): Defaults to False. If True, it will remove duplicated data. order (str, optional): Defaults to 'asc'. The opposite option is 'desc'. """ if sort_by: reverse = order == 'desc' total = sorted(total, key=itemgetter(sort_by), reverse=reverse) if no_duplicate: total = [key for key, _ in groupby(total)] data = json.dumps(total, ensure_ascii=False) Path(name).write_text(data, encoding='utf-8')
python
{ "resource": "" }
q271
IlluminantMixin.set_observer
train
def set_observer(self, observer): """ Validates and sets the color's observer angle. .. note:: This only changes the observer angle value. It does no conversion of the color's coordinates. :param str observer: One of '2' or '10'. """ observer = str(observer) if observer not in color_constants.OBSERVERS: raise InvalidObserverError(self) self.observer = observer
python
{ "resource": "" }
q272
IlluminantMixin.set_illuminant
train
def set_illuminant(self, illuminant): """ Validates and sets the color's illuminant. .. note:: This only changes the illuminant. It does no conversion of the color's coordinates. For this, you'll want to refer to :py:meth:`XYZColor.apply_adaptation <colormath.color_objects.XYZColor.apply_adaptation>`. .. tip:: Call this after setting your observer. :param str illuminant: One of the various illuminants. """ illuminant = illuminant.lower() if illuminant not in color_constants.ILLUMINANTS[self.observer]: raise InvalidIlluminantError(illuminant) self.illuminant = illuminant
python
{ "resource": "" }
q273
SpectralColor.get_numpy_array
train
def get_numpy_array(self): """ Dump this color into NumPy array. """ # This holds the obect's spectral data, and will be passed to # numpy.array() to create a numpy array (matrix) for the matrix math # that will be done during the conversion to XYZ. values = [] # Use the required value list to build this dynamically. Default to # 0.0, since that ultimately won't affect the outcome due to the math # involved. for val in self.VALUES: values.append(getattr(self, val, 0.0)) # Create and the actual numpy array/matrix from the spectral list. color_array = numpy.array([values]) return color_array
python
{ "resource": "" }
q274
XYZColor.apply_adaptation
train
def apply_adaptation(self, target_illuminant, adaptation='bradford'): """ This applies an adaptation matrix to change the XYZ color's illuminant. You'll most likely only need this during RGB conversions. """ logger.debug(" \- Original illuminant: %s", self.illuminant) logger.debug(" \- Target illuminant: %s", target_illuminant) # If the XYZ values were taken with a different reference white than the # native reference white of the target RGB space, a transformation matrix # must be applied. if self.illuminant != target_illuminant: logger.debug(" \* Applying transformation from %s to %s ", self.illuminant, target_illuminant) # Sets the adjusted XYZ values, and the new illuminant. apply_chromatic_adaptation_on_color( color=self, targ_illum=target_illuminant, adaptation=adaptation)
python
{ "resource": "" }
q275
BaseRGBColor._clamp_rgb_coordinate
train
def _clamp_rgb_coordinate(self, coord): """ Clamps an RGB coordinate, taking into account whether or not the color is upscaled or not. :param float coord: The coordinate value. :rtype: float :returns: The clamped value. """ if not self.is_upscaled: return min(max(coord, 0.0), 1.0) else: return min(max(coord, 1), 255)
python
{ "resource": "" }
q276
BaseRGBColor.get_upscaled_value_tuple
train
def get_upscaled_value_tuple(self): """ Scales an RGB color object from decimal 0.0-1.0 to int 0-255. """ # Scale up to 0-255 values. rgb_r = int(math.floor(0.5 + self.rgb_r * 255)) rgb_g = int(math.floor(0.5 + self.rgb_g * 255)) rgb_b = int(math.floor(0.5 + self.rgb_b * 255)) return rgb_r, rgb_g, rgb_b
python
{ "resource": "" }
q277
auto_density
train
def auto_density(color): """ Given a SpectralColor, automatically choose the correct ANSI T filter. Returns a tuple with a string representation of the filter the calculated density. :param SpectralColor color: The SpectralColor object to calculate density for. :rtype: float :returns: The density value, with the filter selected automatically. """ blue_density = ansi_density(color, ANSI_STATUS_T_BLUE) green_density = ansi_density(color, ANSI_STATUS_T_GREEN) red_density = ansi_density(color, ANSI_STATUS_T_RED) densities = [blue_density, green_density, red_density] min_density = min(densities) max_density = max(densities) density_range = max_density - min_density # See comments in density_standards.py for VISUAL_DENSITY_THRESH to # understand what this is doing. if density_range <= VISUAL_DENSITY_THRESH: return ansi_density(color, ISO_VISUAL) elif blue_density > green_density and blue_density > red_density: return blue_density elif green_density > blue_density and green_density > red_density: return green_density else: return red_density
python
{ "resource": "" }
q278
_get_lab_color1_vector
train
def _get_lab_color1_vector(color): """ Converts an LabColor into a NumPy vector. :param LabColor color: :rtype: numpy.ndarray """ if not color.__class__.__name__ == 'LabColor': raise ValueError( "Delta E functions can only be used with two LabColor objects.") return numpy.array([color.lab_l, color.lab_a, color.lab_b])
python
{ "resource": "" }
q279
_get_adaptation_matrix
train
def _get_adaptation_matrix(wp_src, wp_dst, observer, adaptation): """ Calculate the correct transformation matrix based on origin and target illuminants. The observer angle must be the same between illuminants. See colormath.color_constants.ADAPTATION_MATRICES for a list of possible adaptations. Detailed conversion documentation is available at: http://brucelindbloom.com/Eqn_ChromAdapt.html """ # Get the appropriate transformation matrix, [MsubA]. m_sharp = color_constants.ADAPTATION_MATRICES[adaptation] # In case the white-points are still input as strings # Get white-points for illuminant if isinstance(wp_src, str): orig_illum = wp_src.lower() wp_src = color_constants.ILLUMINANTS[observer][orig_illum] elif hasattr(wp_src, '__iter__'): wp_src = wp_src if isinstance(wp_dst, str): targ_illum = wp_dst.lower() wp_dst = color_constants.ILLUMINANTS[observer][targ_illum] elif hasattr(wp_dst, '__iter__'): wp_dst = wp_dst # Sharpened cone responses ~ rho gamma beta ~ sharpened r g b rgb_src = numpy.dot(m_sharp, wp_src) rgb_dst = numpy.dot(m_sharp, wp_dst) # Ratio of whitepoint sharpened responses m_rat = numpy.diag(rgb_dst / rgb_src) # Final transformation matrix m_xfm = numpy.dot(numpy.dot(pinv(m_sharp), m_rat), m_sharp) return m_xfm
python
{ "resource": "" }
q280
apply_chromatic_adaptation
train
def apply_chromatic_adaptation(val_x, val_y, val_z, orig_illum, targ_illum, observer='2', adaptation='bradford'): """ Applies a chromatic adaptation matrix to convert XYZ values between illuminants. It is important to recognize that color transformation results in color errors, determined by how far the original illuminant is from the target illuminant. For example, D65 to A could result in very high maximum deviance. An informative article with estimate average Delta E values for each illuminant conversion may be found at: http://brucelindbloom.com/ChromAdaptEval.html """ # It's silly to have to do this, but some people may want to call this # function directly, so we'll protect them from messing up upper/lower case. adaptation = adaptation.lower() # Get white-points for illuminant if isinstance(orig_illum, str): orig_illum = orig_illum.lower() wp_src = color_constants.ILLUMINANTS[observer][orig_illum] elif hasattr(orig_illum, '__iter__'): wp_src = orig_illum if isinstance(targ_illum, str): targ_illum = targ_illum.lower() wp_dst = color_constants.ILLUMINANTS[observer][targ_illum] elif hasattr(targ_illum, '__iter__'): wp_dst = targ_illum logger.debug(" \* Applying adaptation matrix: %s", adaptation) # Retrieve the appropriate transformation matrix from the constants. transform_matrix = _get_adaptation_matrix(wp_src, wp_dst, observer, adaptation) # Stuff the XYZ values into a NumPy matrix for conversion. XYZ_matrix = numpy.array((val_x, val_y, val_z)) # Perform the adaptation via matrix multiplication. result_matrix = numpy.dot(transform_matrix, XYZ_matrix) # Return individual X, Y, and Z coordinates. return result_matrix[0], result_matrix[1], result_matrix[2]
python
{ "resource": "" }
q281
apply_chromatic_adaptation_on_color
train
def apply_chromatic_adaptation_on_color(color, targ_illum, adaptation='bradford'): """ Convenience function to apply an adaptation directly to a Color object. """ xyz_x = color.xyz_x xyz_y = color.xyz_y xyz_z = color.xyz_z orig_illum = color.illuminant targ_illum = targ_illum.lower() observer = color.observer adaptation = adaptation.lower() # Return individual X, Y, and Z coordinates. color.xyz_x, color.xyz_y, color.xyz_z = apply_chromatic_adaptation( xyz_x, xyz_y, xyz_z, orig_illum, targ_illum, observer=observer, adaptation=adaptation) color.set_illuminant(targ_illum) return color
python
{ "resource": "" }
q282
example_lab_to_xyz
train
def example_lab_to_xyz(): """ This function shows a simple conversion of an Lab color to an XYZ color. """ print("=== Simple Example: Lab->XYZ ===") # Instantiate an Lab color object with the given values. lab = LabColor(0.903, 16.296, -2.22) # Show a string representation. print(lab) # Convert to XYZ. xyz = convert_color(lab, XYZColor) print(xyz) print("=== End Example ===\n")
python
{ "resource": "" }
q283
example_lchab_to_lchuv
train
def example_lchab_to_lchuv(): """ This function shows very complex chain of conversions in action. LCHab to LCHuv involves four different calculations, making this the conversion requiring the most steps. """ print("=== Complex Example: LCHab->LCHuv ===") # Instantiate an LCHab color object with the given values. lchab = LCHabColor(0.903, 16.447, 352.252) # Show a string representation. print(lchab) # Convert to LCHuv. lchuv = convert_color(lchab, LCHuvColor) print(lchuv) print("=== End Example ===\n")
python
{ "resource": "" }
q284
example_lab_to_rgb
train
def example_lab_to_rgb(): """ Conversions to RGB are a little more complex mathematically. There are also several kinds of RGB color spaces. When converting from a device-independent color space to RGB, sRGB is assumed unless otherwise specified with the target_rgb keyword arg. """ print("=== RGB Example: Lab->RGB ===") # Instantiate an Lab color object with the given values. lab = LabColor(0.903, 16.296, -2.217) # Show a string representation. print(lab) # Convert to XYZ. rgb = convert_color(lab, sRGBColor) print(rgb) print("=== End Example ===\n")
python
{ "resource": "" }
q285
example_rgb_to_xyz
train
def example_rgb_to_xyz(): """ The reverse is similar. """ print("=== RGB Example: RGB->XYZ ===") # Instantiate an Lab color object with the given values. rgb = sRGBColor(120, 130, 140) # Show a string representation. print(rgb) # Convert RGB to XYZ using a D50 illuminant. xyz = convert_color(rgb, XYZColor, target_illuminant='D50') print(xyz) print("=== End Example ===\n")
python
{ "resource": "" }
q286
example_spectral_to_xyz
train
def example_spectral_to_xyz(): """ Instantiate an Lab color object with the given values. Note that the spectral range can run from 340nm to 830nm. Any omitted values assume a value of 0.0, which is more or less ignored. For the distribution below, we are providing an example reading from an X-Rite i1 Pro, which only measures between 380nm and 730nm. """ print("=== Example: Spectral->XYZ ===") spc = SpectralColor( observer='2', illuminant='d50', spec_380nm=0.0600, spec_390nm=0.0600, spec_400nm=0.0641, spec_410nm=0.0654, spec_420nm=0.0645, spec_430nm=0.0605, spec_440nm=0.0562, spec_450nm=0.0543, spec_460nm=0.0537, spec_470nm=0.0541, spec_480nm=0.0559, spec_490nm=0.0603, spec_500nm=0.0651, spec_510nm=0.0680, spec_520nm=0.0705, spec_530nm=0.0736, spec_540nm=0.0772, spec_550nm=0.0809, spec_560nm=0.0870, spec_570nm=0.0990, spec_580nm=0.1128, spec_590nm=0.1251, spec_600nm=0.1360, spec_610nm=0.1439, spec_620nm=0.1511, spec_630nm=0.1590, spec_640nm=0.1688, spec_650nm=0.1828, spec_660nm=0.1996, spec_670nm=0.2187, spec_680nm=0.2397, spec_690nm=0.2618, spec_700nm=0.2852, spec_710nm=0.2500, spec_720nm=0.2400, spec_730nm=0.2300) xyz = convert_color(spc, XYZColor) print(xyz) print("=== End Example ===\n")
python
{ "resource": "" }
q287
example_lab_to_ipt
train
def example_lab_to_ipt(): """ This function shows a simple conversion of an XYZ color to an IPT color. """ print("=== Simple Example: XYZ->IPT ===") # Instantiate an XYZ color object with the given values. xyz = XYZColor(0.5, 0.5, 0.5, illuminant='d65') # Show a string representation. print(xyz) # Convert to IPT. ipt = convert_color(xyz, IPTColor) print(ipt) print("=== End Example ===\n")
python
{ "resource": "" }
q288
apply_RGB_matrix
train
def apply_RGB_matrix(var1, var2, var3, rgb_type, convtype="xyz_to_rgb"): """ Applies an RGB working matrix to convert from XYZ to RGB. The arguments are tersely named var1, var2, and var3 to allow for the passing of XYZ _or_ RGB values. var1 is X for XYZ, and R for RGB. var2 and var3 follow suite. """ convtype = convtype.lower() # Retrieve the appropriate transformation matrix from the constants. rgb_matrix = rgb_type.conversion_matrices[convtype] logger.debug(" \* Applying RGB conversion matrix: %s->%s", rgb_type.__class__.__name__, convtype) # Stuff the RGB/XYZ values into a NumPy matrix for conversion. var_matrix = numpy.array(( var1, var2, var3 )) # Perform the adaptation via matrix multiplication. result_matrix = numpy.dot(rgb_matrix, var_matrix) rgb_r, rgb_g, rgb_b = result_matrix # Clamp these values to a valid range. rgb_r = max(rgb_r, 0.0) rgb_g = max(rgb_g, 0.0) rgb_b = max(rgb_b, 0.0) return rgb_r, rgb_g, rgb_b
python
{ "resource": "" }
q289
color_conversion_function
train
def color_conversion_function(start_type, target_type): """ Decorator to indicate a function that performs a conversion from one color space to another. This decorator will return the original function unmodified, however it will be registered in the _conversion_manager so it can be used to perform color space transformations between color spaces that do not have direct conversion functions (e.g., Luv to CMYK). Note: For a conversion to/from RGB supply the BaseRGBColor class. :param start_type: Starting color space type :param target_type: Target color space type """ def decorator(f): f.start_type = start_type f.target_type = target_type _conversion_manager.add_type_conversion(start_type, target_type, f) return f return decorator
python
{ "resource": "" }
q290
Spectral_to_XYZ
train
def Spectral_to_XYZ(cobj, illuminant_override=None, *args, **kwargs): """ Converts spectral readings to XYZ. """ # If the user provides an illuminant_override numpy array, use it. if illuminant_override: reference_illum = illuminant_override else: # Otherwise, look up the illuminant from known standards based # on the value of 'illuminant' pulled from the SpectralColor object. try: reference_illum = spectral_constants.REF_ILLUM_TABLE[cobj.illuminant] except KeyError: raise InvalidIlluminantError(cobj.illuminant) # Get the spectral distribution of the selected standard observer. if cobj.observer == '10': std_obs_x = spectral_constants.STDOBSERV_X10 std_obs_y = spectral_constants.STDOBSERV_Y10 std_obs_z = spectral_constants.STDOBSERV_Z10 else: # Assume 2 degree, since it is theoretically the only other possibility. std_obs_x = spectral_constants.STDOBSERV_X2 std_obs_y = spectral_constants.STDOBSERV_Y2 std_obs_z = spectral_constants.STDOBSERV_Z2 # This is a NumPy array containing the spectral distribution of the color. sample = cobj.get_numpy_array() # The denominator is constant throughout the entire calculation for X, # Y, and Z coordinates. Calculate it once and re-use. denom = std_obs_y * reference_illum # This is also a common element in the calculation whereby the sample # NumPy array is multiplied by the reference illuminant's power distribution # (which is also a NumPy array). sample_by_ref_illum = sample * reference_illum # Calculate the numerator of the equation to find X. x_numerator = sample_by_ref_illum * std_obs_x y_numerator = sample_by_ref_illum * std_obs_y z_numerator = sample_by_ref_illum * std_obs_z xyz_x = x_numerator.sum() / denom.sum() xyz_y = y_numerator.sum() / denom.sum() xyz_z = z_numerator.sum() / denom.sum() return XYZColor( xyz_x, xyz_y, xyz_z, observer=cobj.observer, illuminant=cobj.illuminant)
python
{ "resource": "" }
q291
Lab_to_XYZ
train
def Lab_to_XYZ(cobj, *args, **kwargs): """ Convert from Lab to XYZ """ illum = cobj.get_illuminant_xyz() xyz_y = (cobj.lab_l + 16.0) / 116.0 xyz_x = cobj.lab_a / 500.0 + xyz_y xyz_z = xyz_y - cobj.lab_b / 200.0 if math.pow(xyz_y, 3) > color_constants.CIE_E: xyz_y = math.pow(xyz_y, 3) else: xyz_y = (xyz_y - 16.0 / 116.0) / 7.787 if math.pow(xyz_x, 3) > color_constants.CIE_E: xyz_x = math.pow(xyz_x, 3) else: xyz_x = (xyz_x - 16.0 / 116.0) / 7.787 if math.pow(xyz_z, 3) > color_constants.CIE_E: xyz_z = math.pow(xyz_z, 3) else: xyz_z = (xyz_z - 16.0 / 116.0) / 7.787 xyz_x = (illum["X"] * xyz_x) xyz_y = (illum["Y"] * xyz_y) xyz_z = (illum["Z"] * xyz_z) return XYZColor( xyz_x, xyz_y, xyz_z, observer=cobj.observer, illuminant=cobj.illuminant)
python
{ "resource": "" }
q292
Luv_to_XYZ
train
def Luv_to_XYZ(cobj, *args, **kwargs): """ Convert from Luv to XYZ. """ illum = cobj.get_illuminant_xyz() # Without Light, there is no color. Short-circuit this and avoid some # zero division errors in the var_a_frac calculation. if cobj.luv_l <= 0.0: xyz_x = 0.0 xyz_y = 0.0 xyz_z = 0.0 return XYZColor( xyz_x, xyz_y, xyz_z, observer=cobj.observer, illuminant=cobj.illuminant) # Various variables used throughout the conversion. cie_k_times_e = color_constants.CIE_K * color_constants.CIE_E u_sub_0 = (4.0 * illum["X"]) / (illum["X"] + 15.0 * illum["Y"] + 3.0 * illum["Z"]) v_sub_0 = (9.0 * illum["Y"]) / (illum["X"] + 15.0 * illum["Y"] + 3.0 * illum["Z"]) var_u = cobj.luv_u / (13.0 * cobj.luv_l) + u_sub_0 var_v = cobj.luv_v / (13.0 * cobj.luv_l) + v_sub_0 # Y-coordinate calculations. if cobj.luv_l > cie_k_times_e: xyz_y = math.pow((cobj.luv_l + 16.0) / 116.0, 3.0) else: xyz_y = cobj.luv_l / color_constants.CIE_K # X-coordinate calculation. xyz_x = xyz_y * 9.0 * var_u / (4.0 * var_v) # Z-coordinate calculation. xyz_z = xyz_y * (12.0 - 3.0 * var_u - 20.0 * var_v) / (4.0 * var_v) return XYZColor( xyz_x, xyz_y, xyz_z, illuminant=cobj.illuminant, observer=cobj.observer)
python
{ "resource": "" }
q293
xyY_to_XYZ
train
def xyY_to_XYZ(cobj, *args, **kwargs): """ Convert from xyY to XYZ. """ # avoid division by zero if cobj.xyy_y == 0.0: xyz_x = 0.0 xyz_y = 0.0 xyz_z = 0.0 else: xyz_x = (cobj.xyy_x * cobj.xyy_Y) / cobj.xyy_y xyz_y = cobj.xyy_Y xyz_z = ((1.0 - cobj.xyy_x - cobj.xyy_y) * xyz_y) / cobj.xyy_y return XYZColor( xyz_x, xyz_y, xyz_z, illuminant=cobj.illuminant, observer=cobj.observer)
python
{ "resource": "" }
q294
XYZ_to_xyY
train
def XYZ_to_xyY(cobj, *args, **kwargs): """ Convert from XYZ to xyY. """ xyz_sum = cobj.xyz_x + cobj.xyz_y + cobj.xyz_z # avoid division by zero if xyz_sum == 0.0: xyy_x = 0.0 xyy_y = 0.0 else: xyy_x = cobj.xyz_x / xyz_sum xyy_y = cobj.xyz_y / xyz_sum xyy_Y = cobj.xyz_y return xyYColor( xyy_x, xyy_y, xyy_Y, observer=cobj.observer, illuminant=cobj.illuminant)
python
{ "resource": "" }
q295
XYZ_to_Luv
train
def XYZ_to_Luv(cobj, *args, **kwargs): """ Convert from XYZ to Luv """ temp_x = cobj.xyz_x temp_y = cobj.xyz_y temp_z = cobj.xyz_z denom = temp_x + (15.0 * temp_y) + (3.0 * temp_z) # avoid division by zero if denom == 0.0: luv_u = 0.0 luv_v = 0.0 else: luv_u = (4.0 * temp_x) / denom luv_v = (9.0 * temp_y) / denom illum = cobj.get_illuminant_xyz() temp_y = temp_y / illum["Y"] if temp_y > color_constants.CIE_E: temp_y = math.pow(temp_y, (1.0 / 3.0)) else: temp_y = (7.787 * temp_y) + (16.0 / 116.0) ref_U = (4.0 * illum["X"]) / (illum["X"] + (15.0 * illum["Y"]) + (3.0 * illum["Z"])) ref_V = (9.0 * illum["Y"]) / (illum["X"] + (15.0 * illum["Y"]) + (3.0 * illum["Z"])) luv_l = (116.0 * temp_y) - 16.0 luv_u = 13.0 * luv_l * (luv_u - ref_U) luv_v = 13.0 * luv_l * (luv_v - ref_V) return LuvColor( luv_l, luv_u, luv_v, observer=cobj.observer, illuminant=cobj.illuminant)
python
{ "resource": "" }
q296
XYZ_to_Lab
train
def XYZ_to_Lab(cobj, *args, **kwargs): """ Converts XYZ to Lab. """ illum = cobj.get_illuminant_xyz() temp_x = cobj.xyz_x / illum["X"] temp_y = cobj.xyz_y / illum["Y"] temp_z = cobj.xyz_z / illum["Z"] if temp_x > color_constants.CIE_E: temp_x = math.pow(temp_x, (1.0 / 3.0)) else: temp_x = (7.787 * temp_x) + (16.0 / 116.0) if temp_y > color_constants.CIE_E: temp_y = math.pow(temp_y, (1.0 / 3.0)) else: temp_y = (7.787 * temp_y) + (16.0 / 116.0) if temp_z > color_constants.CIE_E: temp_z = math.pow(temp_z, (1.0 / 3.0)) else: temp_z = (7.787 * temp_z) + (16.0 / 116.0) lab_l = (116.0 * temp_y) - 16.0 lab_a = 500.0 * (temp_x - temp_y) lab_b = 200.0 * (temp_y - temp_z) return LabColor( lab_l, lab_a, lab_b, observer=cobj.observer, illuminant=cobj.illuminant)
python
{ "resource": "" }
q297
XYZ_to_RGB
train
def XYZ_to_RGB(cobj, target_rgb, *args, **kwargs): """ XYZ to RGB conversion. """ temp_X = cobj.xyz_x temp_Y = cobj.xyz_y temp_Z = cobj.xyz_z logger.debug(" \- Target RGB space: %s", target_rgb) target_illum = target_rgb.native_illuminant logger.debug(" \- Target native illuminant: %s", target_illum) logger.debug(" \- XYZ color's illuminant: %s", cobj.illuminant) # If the XYZ values were taken with a different reference white than the # native reference white of the target RGB space, a transformation matrix # must be applied. if cobj.illuminant != target_illum: logger.debug(" \* Applying transformation from %s to %s ", cobj.illuminant, target_illum) # Get the adjusted XYZ values, adapted for the target illuminant. temp_X, temp_Y, temp_Z = apply_chromatic_adaptation( temp_X, temp_Y, temp_Z, orig_illum=cobj.illuminant, targ_illum=target_illum) logger.debug(" \* New values: %.3f, %.3f, %.3f", temp_X, temp_Y, temp_Z) # Apply an RGB working space matrix to the XYZ values (matrix mul). rgb_r, rgb_g, rgb_b = apply_RGB_matrix( temp_X, temp_Y, temp_Z, rgb_type=target_rgb, convtype="xyz_to_rgb") # v linear_channels = dict(r=rgb_r, g=rgb_g, b=rgb_b) # V nonlinear_channels = {} if target_rgb == sRGBColor: for channel in ['r', 'g', 'b']: v = linear_channels[channel] if v <= 0.0031308: nonlinear_channels[channel] = v * 12.92 else: nonlinear_channels[channel] = 1.055 * math.pow(v, 1 / 2.4) - 0.055 elif target_rgb == BT2020Color: if kwargs.get('is_12_bits_system'): a, b = 1.0993, 0.0181 else: a, b = 1.099, 0.018 for channel in ['r', 'g', 'b']: v = linear_channels[channel] if v < b: nonlinear_channels[channel] = v * 4.5 else: nonlinear_channels[channel] = a * math.pow(v, 0.45) - (a - 1) else: # If it's not sRGB... for channel in ['r', 'g', 'b']: v = linear_channels[channel] nonlinear_channels[channel] = math.pow(v, 1 / target_rgb.rgb_gamma) return target_rgb( nonlinear_channels['r'], nonlinear_channels['g'], nonlinear_channels['b'])
python
{ "resource": "" }
q298
RGB_to_XYZ
train
def RGB_to_XYZ(cobj, target_illuminant=None, *args, **kwargs): """ RGB to XYZ conversion. Expects 0-255 RGB values. Based off of: http://www.brucelindbloom.com/index.html?Eqn_RGB_to_XYZ.html """ # Will contain linearized RGB channels (removed the gamma func). linear_channels = {} if isinstance(cobj, sRGBColor): for channel in ['r', 'g', 'b']: V = getattr(cobj, 'rgb_' + channel) if V <= 0.04045: linear_channels[channel] = V / 12.92 else: linear_channels[channel] = math.pow((V + 0.055) / 1.055, 2.4) elif isinstance(cobj, BT2020Color): if kwargs.get('is_12_bits_system'): a, b, c = 1.0993, 0.0181, 0.081697877417347 else: a, b, c = 1.099, 0.018, 0.08124794403514049 for channel in ['r', 'g', 'b']: V = getattr(cobj, 'rgb_' + channel) if V <= c: linear_channels[channel] = V / 4.5 else: linear_channels[channel] = math.pow((V + (a - 1)) / a, 1 / 0.45) else: # If it's not sRGB... gamma = cobj.rgb_gamma for channel in ['r', 'g', 'b']: V = getattr(cobj, 'rgb_' + channel) linear_channels[channel] = math.pow(V, gamma) # Apply an RGB working space matrix to the XYZ values (matrix mul). xyz_x, xyz_y, xyz_z = apply_RGB_matrix( linear_channels['r'], linear_channels['g'], linear_channels['b'], rgb_type=cobj, convtype="rgb_to_xyz") if target_illuminant is None: target_illuminant = cobj.native_illuminant # The illuminant of the original RGB object. This will always match # the RGB colorspace's native illuminant. illuminant = cobj.native_illuminant xyzcolor = XYZColor(xyz_x, xyz_y, xyz_z, illuminant=illuminant) # This will take care of any illuminant changes for us (if source # illuminant != target illuminant). xyzcolor.apply_adaptation(target_illuminant) return xyzcolor
python
{ "resource": "" }
q299
RGB_to_HSV
train
def RGB_to_HSV(cobj, *args, **kwargs): """ Converts from RGB to HSV. H values are in degrees and are 0 to 360. S values are a percentage, 0.0 to 1.0. V values are a percentage, 0.0 to 1.0. """ var_R = cobj.rgb_r var_G = cobj.rgb_g var_B = cobj.rgb_b var_max = max(var_R, var_G, var_B) var_min = min(var_R, var_G, var_B) var_H = __RGB_to_Hue(var_R, var_G, var_B, var_min, var_max) if var_max == 0: var_S = 0 else: var_S = 1.0 - (var_min / var_max) var_V = var_max return HSVColor( var_H, var_S, var_V)
python
{ "resource": "" }