code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
import re
from typing import List, Set, FrozenSet, Callable # noqa: ignore=F401
from typing import Iterable, Tuple, Dict # noqa: ignore=F401
from libscanbuild import shell_split, run_command
__all__ = ['get_version', 'get_arguments', 'get_checkers']
# regex for activated checker
ACTIVE_CHECKER_PATTERN = re.compile(r'^-analyzer-checker=(.*)$')
def get_version(clang):
# type: (str) -> str
""" Returns the compiler version as string.
:param clang: the compiler we are using
:return: the version string printed to stderr """
output = run_command([clang, '-v'])
# the relevant version info is in the first line
return output[0]
def get_arguments(command, cwd):
# type: (List[str], str) -> List[str]
""" Capture Clang invocation.
:param command: the compilation command
:param cwd: the current working directory
:return: the detailed front-end invocation command """
cmd = command[:]
cmd.insert(1, '-###')
output = run_command(cmd, cwd=cwd)
# The relevant information is in the last line of the output.
# Don't check if finding last line fails, would throw exception anyway.
last_line = output[-1]
if re.search(r'clang(.*): error:', last_line):
raise Exception(last_line)
return shell_split(last_line)
def get_active_checkers(clang, plugins):
# type: (str, List[str]) -> FrozenSet[str]
""" Get the active checker list.
:param clang: the compiler we are using
:param plugins: list of plugins which was requested by the user
:return: list of checker names which are active
To get the default checkers we execute Clang to print how this
compilation would be called. And take out the enabled checker from the
arguments. For input file we specify stdin and pass only language
information. """
def get_active_checkers_for(language):
# type: (str) -> List[str]
""" Returns a list of active checkers for the given language. """
load_args = [arg
for plugin in plugins
for arg in ['-Xclang', '-load', '-Xclang', plugin]]
cmd = [clang, '--analyze'] + load_args + ['-x', language, '-']
return [candidate.group(1)
for candidate in (ACTIVE_CHECKER_PATTERN.match(arg)
for arg in get_arguments(cmd, '.'))
if candidate]
result = set() # type: Set[str]
for language in ['c', 'c++', 'objective-c', 'objective-c++']:
result.update(get_active_checkers_for(language))
return frozenset(result)
def is_active(checkers):
# type: (Iterable[str]) -> Callable[[str], bool]
""" Returns a method, which classifies the checker active or not,
based on the received checker name list. """
def predicate(checker):
# type: (str) -> bool
""" Returns True if the given checker is active. """
return any(pattern.match(checker) for pattern in patterns)
patterns = [re.compile(r'^' + a + r'(\.|$)') for a in checkers]
return predicate
def parse_checkers(stream):
# type: (List[str]) -> Iterable[Tuple[str, str]]
""" Parse clang -analyzer-checker-help output.
Below the line 'CHECKERS:' are there the name description pairs.
Many of them are in one line, but some long named checker has the
name and the description in separate lines.
The checker name is always prefixed with two space character. The
name contains no whitespaces. Then followed by newline (if it's
too long) or other space characters comes the description of the
checker. The description ends with a newline character.
:param stream: list of lines to parse
:return: generator of tuples
(<checker name>, <checker description>) """
lines = iter(stream)
# find checkers header
for line in lines:
if re.match(r'^CHECKERS:', line):
break
# find entries
state = None
for line in lines:
if state and not re.match(r'^\s\s\S', line):
yield (state, line.strip())
state = None
elif re.match(r'^\s\s\S+$', line.rstrip()):
state = line.strip()
else:
pattern = re.compile(r'^\s\s(?P<key>\S*)\s*(?P<value>.*)')
match = pattern.match(line.rstrip())
if match:
current = match.groupdict()
yield (current['key'], current['value'])
def get_checkers(clang, plugins):
# type: (str, List[str]) -> Dict[str, Tuple[str, bool]]
""" Get all the available checkers from default and from the plugins.
:param clang: the compiler we are using
:param plugins: list of plugins which was requested by the user
:return: a dictionary of all available checkers and its status
{<checker name>: (<checker description>, <is active by default>)} """
load = [elem for plugin in plugins for elem in ['-load', plugin]]
cmd = [clang, '-cc1'] + load + ['-analyzer-checker-help']
lines = run_command(cmd)
is_active_checker = is_active(get_active_checkers(clang, plugins))
checkers = {
name: (description, is_active_checker(name))
for name, description in parse_checkers(lines)
}
if not checkers:
raise Exception('Could not query Clang for available checkers.')
return checkers
|
/scan-build-2.0.20.tar.gz/scan-build-2.0.20/libscanbuild/clang.py
| 0.786213 | 0.276211 |
clang.py
|
pypi
|
""" This module is a collection of methods commonly used in this project. """
import collections
import functools
import json
import logging
import os
import os.path
import re
import shlex
import subprocess
import sys
import pprint
from typing import List, Any, Dict, Callable # noqa: ignore=F401
ENVIRONMENT_KEY = 'INTERCEPT_BUILD'
Execution = collections.namedtuple('Execution', ['pid', 'cwd', 'cmd'])
def shell_split(string):
# type: (str) -> List[str]
""" Takes a command string and returns as a list. """
def unescape(arg):
# type: (str) -> str
""" Gets rid of the escaping characters. """
if len(arg) >= 2 and arg[0] == arg[-1] and arg[0] == '"':
return re.sub(r'\\(["\\])', r'\1', arg[1:-1])
return re.sub(r'\\([\\ $%&\(\)\[\]\{\}\*|<>@?!])', r'\1', arg)
return [unescape(token) for token in shlex.split(string)]
def run_build(command, *args, **kwargs):
# type: (...) -> int
""" Run and report build command execution
:param command: list of tokens
:return: exit code of the process
"""
environment = kwargs.get('env', os.environ)
logging.debug('run build %s, in environment:\n%s',
command,
pprint.pformat(environment, indent=1, width=79))
exit_code = subprocess.call(command, *args, **kwargs)
logging.debug('build finished with exit code: %d', exit_code)
return exit_code
def run_command(command, cwd=None):
# type: (List[str], str) -> List[str]
""" Run a given command and report the execution.
:param command: array of tokens
:param cwd: the working directory where the command will be executed
:return: output of the command
"""
def decode_when_needed(result):
# type: (Any) -> str
""" check_output returns bytes or string depend on python version """
if not isinstance(result, str):
return result.decode('utf-8')
return result
try:
directory = os.path.abspath(cwd) if cwd else os.getcwd()
logging.debug('exec command %s in %s', command, directory)
output = subprocess.check_output(command,
cwd=directory,
stderr=subprocess.STDOUT)
return decode_when_needed(output).splitlines()
except subprocess.CalledProcessError as ex:
ex.output = decode_when_needed(ex.output).splitlines()
raise ex
def reconfigure_logging(verbose_level):
""" Reconfigure logging level and format based on the verbose flag.
:param verbose_level: number of `-v` flags received by the command
:return: no return value
"""
# exit when nothing to do
if verbose_level == 0:
return
root = logging.getLogger()
# tune level
level = logging.WARNING - min(logging.WARNING, (10 * verbose_level))
root.setLevel(level)
# be verbose with messages
if verbose_level <= 3:
fmt_string = '%(name)s: %(levelname)s: %(message)s'
else:
fmt_string = '%(name)s: %(levelname)s: %(funcName)s: %(message)s'
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(fmt=fmt_string))
root.handlers = [handler]
def command_entry_point(function):
# type: (Callable[[], int]) -> Callable[[], int]
""" Decorator for command entry methods.
The decorator initialize/shutdown logging and guard on programming
errors (catch exceptions).
The decorated method can have arbitrary parameters, the return value will
be the exit code of the process. """
@functools.wraps(function)
def wrapper():
# type: () -> int
""" Do housekeeping tasks and execute the wrapped method. """
try:
logging.basicConfig(format='%(name)s: %(message)s',
level=logging.WARNING,
stream=sys.stdout)
# this hack to get the executable name as %(name)
logging.getLogger().name = os.path.basename(sys.argv[0])
return function()
except KeyboardInterrupt:
logging.warning('Keyboard interrupt')
return 130 # signal received exit code for bash
except (OSError, subprocess.CalledProcessError):
logging.exception('Internal error.')
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.error("Please report this bug and attach the output "
"to the bug report")
else:
logging.error("Please run this command again and turn on "
"verbose mode (add '-vvvv' as argument).")
return 64 # some non used exit code for internal errors
finally:
logging.shutdown()
return wrapper
def wrapper_entry_point(function):
# type: (Callable[[int, Execution], None]) -> Callable[[], int]
""" Implements compiler wrapper base functionality.
A compiler wrapper executes the real compiler, then implement some
functionality, then returns with the real compiler exit code.
:param function: the extra functionality what the wrapper want to
do on top of the compiler call. If it throws exception, it will be
caught and logged.
:return: the exit code of the real compiler.
The :param function: will receive the following arguments:
:result: the exit code of the compilation.
:execution: the command executed by the wrapper. """
def is_cxx_wrapper():
# type: () -> bool
""" Find out was it a C++ compiler call. Compiler wrapper names
contain the compiler type. C++ compiler wrappers ends with `c++`,
but might have `.exe` extension on windows. """
wrapper_command = os.path.basename(sys.argv[0])
return True if re.match(r'(.+)c\+\+(.*)', wrapper_command) else False
def run_compiler(executable):
# type: (List[str]) -> int
""" Execute compilation with the real compiler. """
command = executable + sys.argv[1:]
logging.debug('compilation: %s', command)
result = subprocess.call(command)
logging.debug('compilation exit code: %d', result)
return result
@functools.wraps(function)
def wrapper():
# type: () -> int
""" It executes the compilation and calls the wrapped method. """
# get relevant parameters from environment
parameters = json.loads(os.environ[ENVIRONMENT_KEY])
reconfigure_logging(parameters['verbose'])
# execute the requested compilation and crash if anything goes wrong
cxx = is_cxx_wrapper()
compiler = parameters['cxx'] if cxx else parameters['cc']
result = run_compiler(compiler)
# call the wrapped method and ignore it's return value
try:
call = Execution(
pid=os.getpid(),
cwd=os.getcwd(),
cmd=['c++' if cxx else 'cc'] + sys.argv[1:])
function(result, call)
except (OSError, subprocess.CalledProcessError):
logging.exception('Compiler wrapper failed complete.')
# always return the real compiler exit code
return result
return wrapper
def wrapper_environment(args):
# type: (...) -> Dict[str, str]
""" Set up environment for interpose compiler wrapper."""
return {
ENVIRONMENT_KEY: json.dumps({
'verbose': args.verbose,
'cc': shell_split(args.cc),
'cxx': shell_split(args.cxx)
})
}
|
/scan-build-2.0.20.tar.gz/scan-build-2.0.20/libscanbuild/__init__.py
| 0.567937 | 0.195978 |
__init__.py
|
pypi
|
import requests
from datetime import datetime, timedelta, timezone
from pycountry import countries
from pathlib import Path
import json
def parse_tz(tzstr):
sign = -1 + (tzstr[0] == "+") * 2 # 1 if timezone is positive, -1 if timezone is negative
tzstr = tzstr[1:]
hour, minute = tzstr.split(":")
hour = int(hour) * sign
minute = int(minute) * sign
dt = timedelta(hours=hour, minutes=minute)
return timezone(dt)
def get_time(tzstr):
timeZone = parse_tz(tzstr)
curTime = datetime.now(tz=timeZone)
return curTime.hour, curTime.minute
def get_country_name(country):
return countries.get(alpha_2=country).name
class Locater:
def __init__(self, api_key):
self.api_key = api_key
self.url = "https://geo.ipify.org/api/v1"
self.directory = Path(__file__).parent
self.cache_path = self.directory / "geoip_cache.json"
self.cache = {}
self.load_cache()
self.api_cnt = 0
def load_cache(self):
if not self.cache_path.is_file():
with open(self.cache_path, 'w') as f:
json.dump(dict(), f)
with open(self.cache_path, "r") as f:
self.cache = json.load(f)
def store_cache(self):
with open(self.cache_path, 'w') as f:
json.dump(self.cache, f)
def locate(self, ip):
if ip in self.cache:
t = self.cache[ip]
country = t['country']
region = t['region']
timeZone = t['timezone']
hour, minute = get_time(timeZone)
else:
data = dict()
data['apiKey'] = self.api_key
data['ipAddress'] = ip
r = requests.get(self.url, params=data)
self.api_cnt += 1
res = r.json()
country = res['location']['country']
region = res['location']['region']
timeZone = res['location']['timezone']
hour, minute = get_time(timeZone)
self.cache[ip] = dict()
self.cache[ip]['country'] = country
self.cache[ip]['region'] = region
self.cache[ip]['timezone'] = timeZone
self.store_cache()
return get_country_name(country), region, hour, minute
|
/scan_for_webcams-1.4.3-py3-none-any.whl/scanforwebcams/geoip.py
| 0.415847 | 0.164752 |
geoip.py
|
pypi
|
Scan Image Slicer
---
Scan image slicer is a tool for detecting and separating images from a scanned image (or any image). The designed use case is for digitizing old photos, paper clippings and the like.
### Workflow
- create an empty input folder for the scanned images
- (optional) create a folder structure inside the input folder that is then used for naming and organizing the output images
- create an empty output folder (the folder structure will be mirrored here from the input folder)
- scan as many images as you like and place/organize them inside the input folder
- configure the tool using the config file and command line arguments
- slice and dice
### Installation using pip
```
pip install scan-image-slicer
```
### Requirements with pip install
- Python 3.6+
- pip
Using the tool
---
```
scan-image-slicer [options]
```
The easiest way to use the tool is to first edit the default config file and then override the settings when needed with command line arguments, which are explained below.
- **you need to run the tool once for the default config file to be created**
- location on Windows: Documents -> ScanImageSlicer -> config.yaml
- location on Linux: ~/.config/ScanImageSlicer/config.yaml
- location on MacOS: ~/Library/Application Support/ScanImageSlicer/config.yaml
- location on Other: ~/.ScanImageSlicer/config.yaml
### Help
- `-h`, `--help` Display help
### Quickstart
- `-skip`, `--skip-confirm` skip the confirm start question
### Modes
Only one mode can be used at a time.
- `-test`, `--test-mode`
Test mode shows you the scanned image with added colored rectangles that depict detected and ignored images.
- blue rectangles are images that would be saved
- purple rectangles are images that would be ignored
- `-pre`, `--preview-mode`
Preview mode shows you the individual sliced images one by one but does not save them unless you press the S key. Toggle filters with the F key.
- `-slice`, `--slice-mode`
Slice mode slices each scanned image on the task list and saves the slices to the output folder. Use it after you have finalized your settings.
### Paths
- `-i PATH`, `--input PATH` input folder for the scanned images
- `-o PATH`, `--output PATH` output folder for the sliced images
- `-c FILE`, `--config FILE` path to the custom config file
### Image detection & viewing
- `-white 0..255`, `--white-threshold 0..255` _(default is 230)_
The white threshold value is used for image detection. Tweak it to match your scanner's background white color (the space between images). If possible, bump up your gamma value slightly in your scanning software for easier image detection.
- `-min N`, `--minimum-size N` _(default is 3)_
Minimum size is used to discard too small images that are likely false positives. The value is in % of the total scanned image area.
- `-view N`, `--view-height N` _(default is 800)_
The height of the image inside the image viewer (won't be upscaled).
### Scaling
- `-scaleF N`, `--scale-factor N` scale sliced image using a scale factor value
- `-scaleW N`, `--scale-width N` scale sliced image using a new width value
- `-scaleH N`, `--scale-height N` scale sliced image using a new height value
Only one method of scaling can be used at a time. All methods preserve the aspect ratio. A value of zero means the option is disabled.
### Filters & fixes
- `-filtD 0..5`, `--filter-denoise 0..5` _(default is 1)_
Remove scanner noise from the sliced image. Higher values take more time. A value of zero means the option is disabled.
- `-filtB N > 1.0`, `--filter-brightness N > 1.0` _(default is 1.0)_
Add brightness to the sliced image with values above 1.0.
- `-filtC N > 1.0`, `--filter-contrast N > 1.0` _(default is 1.0)_
Add contrast to the sliced image with values above 1.0.
- `-filtG N > 1.0`, `--filter-gamma N > 1.0` _(default is 1.0)_
Add gamma correction to the sliced image with values above 1.0.
- `-pfix 0..89`, `--perspective-fix 0..89` _(default is 0)_
Add perspective correction to the sliced image. The given value is the maximum allowed 'tilt' of the sliced image in degrees. A value of zero means the option is disabled.
### File format
- `-save JPEG|PNG|WEBP`, `--save-format JPEG|PNG|WEBP` _(default is PNG)_
- `-png 0..9`, `--png-compression 0..9` _(default is 3)_
Note: higher compression levels take more time per sliced image.
- `-jpeg 0..100`, `--jpeg-quality 0..100` _(default is 95)_
- `-webp 1..101`, `--webp-quality 1..101` _(default is 90)_
Note: quality level of 101 is lossless.
### List information
- `-listS`, `--list-scans` list scanned images name and ID
- `-listF`, `--list-file` save scanned images name and ID as a text file at default config dir
- `-listT`, `--list-tasks` list added tasks
- `-listC`, `--list-cmds` list given commands
### Add/Remove images to/from the task list
- `-addA`, `--add-all` add all scanned images
- `-addID [ID ...]`, `--add-id [ID ...]` add scanned images using IDs eg. 1 2 3
- `-addN N`, `--add-new N` add N amount of newest scanned images by creation time (ctime)
- `-addO N`, `--add-old N` add N amount of oldest scanned images by creation time (ctime)
- `-addR N`, `--add-random N` add N amount of random scanned images
- `-remID [ID ...]`, `--remove-id [ID ...]` remove scanned images using IDs eg. 1 2 3
### Infobar
- `-info`, `--show-infobar` show infobar on preview mode
- `-fontS N`, `--font-scale N` infobar font scale _(default is 1.0)_
|
/scan-image-slicer-1.3.0.tar.gz/scan-image-slicer-1.3.0/README.md
| 0.651022 | 0.930458 |
README.md
|
pypi
|
from typing import Dict, List, TypedDict, Union
APPEND_CREDIT_CARD_DEFAULT = False
"""Default value of the field path 'Arguments append_credit_card'"""
ASSISTED_SPLIT_DEFAULT = False
"""Default value of the field path 'Arguments assisted_split'"""
AUTO_BASH_DEFAULT = False
"""Default value of the field path 'Mode auto_bash'"""
AUTO_LEVEL_DEFAULT = False
"""Default value of the field path 'Arguments auto_level'"""
class Arguments(TypedDict, total=False):
"""Arguments."""
level: Union[bool, int]
"""
Level.
true: => do level on 15% - 85% (under 15 % will be black above 85% will be white), false: => 0% - 100%, <number>: => (0 + <number>)% - (100 - number)%
default: False
"""
auto_level: bool
"""
Auto level.
If no level specified, do auto level
default: False
"""
min_level: Union[int, float]
"""
Min level.
Min level if no level end no auto-level
default: 0
"""
max_level: Union[int, float]
"""
Max level.
Max level if no level end no auto-level
default: 100
"""
cut_white: Union[int, float]
"""
Cut white.
Set the near white pixels on the image to white
default: 255
"""
cut_black: Union[int, float]
"""
Cut black.
Set the near black pixels on the image to black
default: 0
"""
no_crop: bool
"""
No crop.
Don't do any crop
default: False
"""
margin_horizontal: Union[int, float]
"""
Margin horizontal.
The horizontal margin used on auto-detect content [mm]
default: 9
"""
margin_vertical: Union[int, float]
"""
Margin vertical.
The vertical margin used on auto-detect content [mm]
default: 6
"""
dpi: Union[int, float]
"""
Dpi.
The DPI used to convert the mm to pixel
default: 300
"""
sharpen: bool
"""
Sharpen.
Do the sharpen
default: False
"""
dither: bool
"""
Dither.
Do the dither
default: False
"""
tesseract: bool
"""
Tesseract.
Use tesseract to to an OCR on the document
default: True
"""
tesseract_lang: str
"""
Tesseract lang.
The used language for tesseract
default: fra+eng
"""
append_credit_card: bool
"""
Append credit card.
Do an assisted split
default: False
"""
assisted_split: bool
"""
Assisted split.
Do an assisted split
default: False
"""
min_box_size_crop: Union[int, float]
"""
Min box size crop.
The minimum box size to find the content on witch one we will crop [mm]
default: 3
"""
min_box_black_crop: Union[int, float]
"""
Min box black crop.
The minimum black in a box on content find on witch one we will crop [%]
default: 2
"""
contour_kernel_size_crop: Union[int, float]
"""
Contour kernel size crop.
The block size used in a box on content find on witch one we will crop [mm]
default: 1.5
"""
threshold_block_size_crop: Union[int, float]
"""
Threshold block size crop.
The block size used in a box on threshold for content find on witch one we will crop [mm]
default: 1.5
"""
threshold_value_c_crop: Union[int, float]
"""
Threshold value c crop.
A variable used on threshold, should be low on low contrast image, used in a box on content find on witch one we will crop
default: 70
"""
min_box_size_empty: Union[int, float]
"""
Min box size empty.
The minimum box size to find the content to determine if the page is empty [mm]
default: 10
"""
min_box_black_empty: Union[int, float]
"""
Min box black empty.
The minimum black in a box on content find if the page is empty [%]
default: 2
"""
contour_kernel_size_empty: Union[int, float]
"""
Contour kernel size empty.
The block size used in a box on content find if the page is empty [mm]
default: 1.5
"""
threshold_block_size_empty: Union[int, float]
"""
Threshold block size empty.
The block size used in a box on threshold for content find if the page is empty [mm]
default: 1.5
"""
threshold_value_c_empty: Union[int, float]
"""
Threshold value c empty.
A variable used on threshold, should be low on low contrast image, used in a box on content find if the page is empty
default: 70
"""
min_box_size_limit: Union[int, float]
"""
Min box size limit.
The minimum box size to find the limits based on content [mm]
default: 10
"""
min_box_black_limit: Union[int, float]
"""
Min box black limit.
The minimum black in a box on content find the limits based on content [%]
default: 2
"""
contour_kernel_size_limit: Union[int, float]
"""
Contour kernel size limit.
The block size used in a box on content find the limits based on content [mm]
default: 1.5
"""
threshold_block_size_limit: Union[int, float]
"""
Threshold block size limit.
The block size used in a box on threshold for content find the limits based on content [mm]
default: 1.5
"""
threshold_value_c_limit: Union[int, float]
"""
Threshold value c limit.
A variable used on threshold, should be low on low contrast image, used in a box on content find the limits based on content
default: 70
"""
colors: int
"""
Colors.
The number of colors in the png
default: 0
"""
run_optipng: bool
"""
Run optipng.
Run the optipng optimizer
default: True
"""
run_pngquant: bool
"""
Run pngquant.
Run the pngquant optimizer
default: False
"""
pngquant_options: List[str]
"""
Pngquant options.
The pngquant options
default:
- --force
- --speed=1
- --strip
- --quality=0-32
"""
run_exiftool: bool
"""
Run exiftool.
Run the exiftool optimizer
default: False
"""
run_ps2pdf: bool
"""
Run ps2pdf.
Run the ps2pdf optimizer (=> JPEG)
default: False
"""
no_auto_rotate: bool
"""
No auto rotate.
Run the auto rotate detected by Tesseract
default: False
"""
jpeg: bool
"""
Jpeg.
Convert images to JPEG
default: False
"""
jpeg_quality: int
"""
Jpeg quality.
The JPEG quality
default: 90
"""
background_color: List[int]
"""
Background color.
The background color
default:
- 255
- 255
- 255
"""
auto_mask: "AutoMask"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
auto_cut: "AutoMask"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
deskew: "_ArgumentsDeskew"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
line_detection: "LineDetection"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
rule: "Rule"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
class AutoMask(TypedDict, total=False):
"""
Auto mask.
The auto mask configuration, the mask is used to mask the image on crop and skew calculation
"""
lower_hsv_color: List[int]
"""
Lower hsv color.
The lower color in HSV representation
default:
- 0
- 0
- 250
"""
upper_hsv_color: List[int]
"""
Upper hsv color.
The upper color in HSV representation
default:
- 255
- 10
- 255
"""
de_noise_morphology: bool
"""
De noise morphology.
Apply a morphology operation to remove noise
default: True
"""
inverse_mask: bool
"""
Inverse mask.
Inverse the mask
default: False
"""
de_noise_size: int
"""
De noise size.
The size of the artifact that will be de noise
default: 1000
"""
de_noise_level: int
"""
De noise level.
The threshold level used in de noise on the blurry image
default: 220
"""
buffer_size: int
"""
Buffer size.
The size of the buffer add on the mask
default: 20
"""
buffer_level: int
"""
Buffer level.
The threshold level used in buffer on the blurry image
default: 20
"""
additional_filename: str
"""An image file used to add on the mask"""
BACKGROUND_COLOR_DEFAULT = [255, 255, 255]
"""Default value of the field path 'Arguments background_color'"""
BUFFER_LEVEL_DEFAULT = 20
"""Default value of the field path 'Auto mask buffer_level'"""
BUFFER_SIZE_DEFAULT = 20
"""Default value of the field path 'Auto mask buffer_size'"""
COLORS_DEFAULT = 0
"""Default value of the field path 'Arguments colors'"""
CONTOUR_KERNEL_SIZE_CROP_DEFAULT = 1.5
"""Default value of the field path 'Arguments contour_kernel_size_crop'"""
CONTOUR_KERNEL_SIZE_EMPTY_DEFAULT = 1.5
"""Default value of the field path 'Arguments contour_kernel_size_empty'"""
CONTOUR_KERNEL_SIZE_LIMIT_DEFAULT = 1.5
"""Default value of the field path 'Arguments contour_kernel_size_limit'"""
CUT_BLACK_DEFAULT = 0
"""Default value of the field path 'Arguments cut_black'"""
CUT_WHITE_DEFAULT = 255
"""Default value of the field path 'Arguments cut_white'"""
class Configuration(TypedDict, total=False):
"""Configuration."""
extends: str
"""The configuration to extends"""
merge_strategies: "MergeStrategies"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
scan_folder: str
"""This should be shared with the process container in 'source'."""
scanimage: str
"""
Scanimage.
The scanimage command
default: scanimage
"""
scanimage_arguments: List[str]
"""
Scanimage arguments.
The scanimage arguments
default:
- --format=png
- --mode=color
- --resolution=300
"""
extension: str
"""
Extension.
The extension of generate image (png or tiff)
default: png
"""
default_args: "Arguments"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
viewer: str
"""
Viewer.
The command used to start the viewer
default: eog
"""
modes: Dict[str, "Mode"]
"""
Modes.
Customize the modes
default:
adf:
scanimage_arguments:
- --source=ADF
double:
auto_bash: true
rotate_even: true
scanimage_arguments:
- --source=ADF
multi:
scanimage_arguments:
- --batch-prompt
one:
scanimage_arguments:
- --batch-count=1
"""
DESKEW_ANGLE_DERIVATION_DEFAULT = 0.1
"""Default value of the field path 'Arguments deskew angle_derivation'"""
DESKEW_ANGLE_PM_90_DEFAULT = False
"""Default value of the field path 'Arguments deskew angle_pm_90'"""
DESKEW_MAX_ANGLE_DEFAULT = 45
"""Default value of the field path 'Arguments deskew max_angle'"""
DESKEW_MIN_ANGLE_DEFAULT = -45
"""Default value of the field path 'Arguments deskew min_angle'"""
DESKEW_NUM_PEAKS_DEFAULT = 20
"""Default value of the field path 'Arguments deskew num_peaks'"""
DESKEW_SIGMA_DEFAULT = 3.0
"""Default value of the field path 'Arguments deskew sigma'"""
DE_NOISE_LEVEL_DEFAULT = 220
"""Default value of the field path 'Auto mask de_noise_level'"""
DE_NOISE_MORPHOLOGY_DEFAULT = True
"""Default value of the field path 'Auto mask de_noise_morphology'"""
DE_NOISE_SIZE_DEFAULT = 1000
"""Default value of the field path 'Auto mask de_noise_size'"""
DICT_DEFAULT = ["merge"]
"""Default value of the field path 'Merge strategies dict'"""
DITHER_DEFAULT = False
"""Default value of the field path 'Arguments dither'"""
DPI_DEFAULT = 300
"""Default value of the field path 'Arguments dpi'"""
EXTENSION_DEFAULT = "png"
"""Default value of the field path 'Configuration extension'"""
FALLBACK_DEFAULT = ["override"]
"""Default value of the field path 'Merge strategies fallback'"""
INVERSE_MASK_DEFAULT = False
"""Default value of the field path 'Auto mask inverse_mask'"""
JPEG_DEFAULT = False
"""Default value of the field path 'Arguments jpeg'"""
JPEG_QUALITY_DEFAULT = 90
"""Default value of the field path 'Arguments jpeg_quality'"""
LEVEL_DEFAULT = False
"""Default value of the field path 'Arguments level'"""
LINE_DETECTION_APERTURE_SIZE_DEFAULT = 3
"""Default value of the field path 'Line detection aperture_size'"""
LINE_DETECTION_HIGH_THRESHOLD_DEFAULT = 1000
"""Default value of the field path 'Line detection high_threshold'"""
LINE_DETECTION_LOW_THRESHOLD_DEFAULT = 0
"""Default value of the field path 'Line detection low_threshold'"""
LINE_DETECTION_MAX_LINE_GAP_DEFAULT = 100
"""Default value of the field path 'Line detection max_line_gap'"""
LINE_DETECTION_MIN_LINE_LENGTH_DEFAULT = 50
"""Default value of the field path 'Line detection min_line_length'"""
LINE_DETECTION_RHO_DEFAULT = 1
"""Default value of the field path 'Line detection rho'"""
LINE_DETECTION_THRESHOLD_DEFAULT = 100
"""Default value of the field path 'Line detection threshold'"""
LIST_DEFAULT = ["override"]
"""Default value of the field path 'Merge strategies list'"""
LOWER_HSV_COLOR_DEFAULT = [0, 0, 250]
"""Default value of the field path 'Auto mask lower_hsv_color'"""
class LineDetection(TypedDict, total=False):
"""
Line detection.
The line detection used in assisted split
"""
low_threshold: int
"""
Line detection low threshold.
The low threshold used in the Canny edge detector
default: 0
"""
high_threshold: int
"""
Line detection high threshold.
The high threshold used in the Canny edge detector
default: 1000
"""
aperture_size: int
"""
Line detection aperture size.
The aperture size used in the Canny edge detector
default: 3
"""
rho: int
"""
Line detection rho.
The rho used in the Hough transform
default: 1
"""
threshold: int
"""
Line detection threshold.
The threshold used in the Hough transform
default: 100
"""
min_line_length: int
"""
Line detection min line length.
The minimum line length in percentage of the image size used in the Hough transform
default: 50
"""
max_line_gap: int
"""
Line detection max line gap.
The maximum line gap in percentage of the image size used in the Hough transform
default: 100
"""
MARGIN_HORIZONTAL_DEFAULT = 9
"""Default value of the field path 'Arguments margin_horizontal'"""
MARGIN_VERTICAL_DEFAULT = 6
"""Default value of the field path 'Arguments margin_vertical'"""
MAX_LEVEL_DEFAULT = 100
"""Default value of the field path 'Arguments max_level'"""
MIN_BOX_BLACK_CROP_DEFAULT = 2
"""Default value of the field path 'Arguments min_box_black_crop'"""
MIN_BOX_BLACK_EMPTY_DEFAULT = 2
"""Default value of the field path 'Arguments min_box_black_empty'"""
MIN_BOX_BLACK_LIMIT_DEFAULT = 2
"""Default value of the field path 'Arguments min_box_black_limit'"""
MIN_BOX_SIZE_CROP_DEFAULT = 3
"""Default value of the field path 'Arguments min_box_size_crop'"""
MIN_BOX_SIZE_EMPTY_DEFAULT = 10
"""Default value of the field path 'Arguments min_box_size_empty'"""
MIN_BOX_SIZE_LIMIT_DEFAULT = 10
"""Default value of the field path 'Arguments min_box_size_limit'"""
MIN_LEVEL_DEFAULT = 0
"""Default value of the field path 'Arguments min_level'"""
MODES_DEFAULT = {
"adf": {"scanimage_arguments": ["--source=ADF"]},
"double": {"scanimage_arguments": ["--source=ADF"], "auto_bash": True, "rotate_even": True},
"multi": {"scanimage_arguments": ["--batch-prompt"]},
"one": {"scanimage_arguments": ["--batch-count=1"]},
}
"""Default value of the field path 'Configuration modes'"""
class MergeStrategies(TypedDict, total=False):
"""
Merge strategies.
The merge strategy to use, see https://deepmerge.readthedocs.io/en/latest/strategies.html#builtin-strategies
"""
list: List[str]
"""
List.
The merge strategy to use on list
default:
- override
"""
dict: List[str]
"""
Dict.
The merge strategy to use on dict
default:
- merge
"""
fallback: List[str]
"""
Fallback.
The fallback merge strategy
default:
- override
"""
type_conflict: List[str]
"""
Type conflict.
The type_conflict merge strategy
default:
- override
"""
class Mode(TypedDict, total=False):
"""Mode."""
scanimage_arguments: List[str]
"""Additional scanimage arguments"""
auto_bash: bool
"""
Auto bash.
Run the ADF in tow step odd and even, needed for scanner that don't support double face
default: False
"""
rotate_even: bool
"""
Rotate even.
Rotate the even pages, to use in conjunction with auto_bash
default: False
"""
NO_AUTO_ROTATE_DEFAULT = False
"""Default value of the field path 'Arguments no_auto_rotate'"""
NO_CROP_DEFAULT = False
"""Default value of the field path 'Arguments no_crop'"""
PNGQUANT_OPTIONS_DEFAULT = ["--force", "--speed=1", "--strip", "--quality=0-32"]
"""Default value of the field path 'Arguments pngquant_options'"""
ROTATE_EVEN_DEFAULT = False
"""Default value of the field path 'Mode rotate_even'"""
RULE_ENABLE_DEFAULT = True
"""Default value of the field path 'Rule enable'"""
RULE_GRADUATION_COLOR_DEFAULT = [0, 0, 0]
"""Default value of the field path 'Rule graduation_color'"""
RULE_GRADUATION_TEXT_FONT_COLOR_DEFAULT = [0, 0, 0]
"""Default value of the field path 'Rule graduation_text_font_color'"""
RULE_GRADUATION_TEXT_FONT_FILENAME_DEFAULT = "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"
"""Default value of the field path 'Rule graduation_text_font_filename'"""
RULE_GRADUATION_TEXT_FONT_SIZE_DEFAULT = 17
"""Default value of the field path 'Rule graduation_text_font_size'"""
RULE_GRADUATION_TEXT_MARGIN_DEFAULT = 6
"""Default value of the field path 'Rule graduation_text_margin'"""
RULE_LINES_COLOR_DEFAULT = [0, 0, 0]
"""Default value of the field path 'Rule lines_color'"""
RULE_LINES_OPACITY_DEFAULT = 0.2
"""Default value of the field path 'Rule lines_opacity'"""
RULE_LINES_SPACE_DEFAULT = 100
"""Default value of the field path 'Rule lines_space'"""
RULE_MAJOR_GRADUATION_SIZE_DEFAULT = 30
"""Default value of the field path 'Rule major_graduation_size'"""
RULE_MAJOR_GRADUATION_SPACE_DEFAULT = 100
"""Default value of the field path 'Rule major_graduation_space'"""
RULE_MINOR_GRADUATION_SIZE_DEFAULT = 10
"""Default value of the field path 'Rule minor_graduation_size'"""
RULE_MINOR_GRADUATION_SPACE_DEFAULT = 10
"""Default value of the field path 'Rule minor_graduation_space'"""
RUN_EXIFTOOL_DEFAULT = False
"""Default value of the field path 'Arguments run_exiftool'"""
RUN_OPTIPNG_DEFAULT = True
"""Default value of the field path 'Arguments run_optipng'"""
RUN_PNGQUANT_DEFAULT = False
"""Default value of the field path 'Arguments run_pngquant'"""
RUN_PS2PDF_DEFAULT = False
"""Default value of the field path 'Arguments run_ps2pdf'"""
class Rule(TypedDict, total=False):
"""
Rule.
Configuration of rule displayed in assisted split images
"""
enable: bool
"""
Rule enable.
default: True
"""
minor_graduation_space: int
"""
Rule minor graduation space.
default: 10
"""
major_graduation_space: int
"""
Rule major graduation space.
default: 100
"""
lines_space: int
"""
Rule lines space.
default: 100
"""
minor_graduation_size: int
"""
Rule minor graduation size.
default: 10
"""
major_graduation_size: int
"""
Rule major graduation size.
default: 30
"""
graduation_color: List[int]
"""
Rule graduation color.
default:
- 0
- 0
- 0
"""
lines_color: List[int]
"""
Rule lines color.
default:
- 0
- 0
- 0
"""
lines_opacity: Union[int, float]
"""
Rule lines opacity.
default: 0.2
"""
graduation_text_font_filename: str
"""
Rule graduation text font filename.
default: /usr/share/fonts/truetype/dejavu/DejaVuSans.ttf
"""
graduation_text_font_size: Union[int, float]
"""
Rule graduation text font size.
default: 17
"""
graduation_text_font_color: List[int]
"""
Rule graduation text font color.
default:
- 0
- 0
- 0
"""
graduation_text_margin: int
"""
Rule graduation text margin.
default: 6
"""
SCANIMAGE_ARGUMENTS_DEFAULT = ["--format=png", "--mode=color", "--resolution=300"]
"""Default value of the field path 'Configuration scanimage_arguments'"""
SCANIMAGE_DEFAULT = "scanimage"
"""Default value of the field path 'Configuration scanimage'"""
SHARPEN_DEFAULT = False
"""Default value of the field path 'Arguments sharpen'"""
TESSERACT_DEFAULT = True
"""Default value of the field path 'Arguments tesseract'"""
TESSERACT_LANG_DEFAULT = "fra+eng"
"""Default value of the field path 'Arguments tesseract_lang'"""
THRESHOLD_BLOCK_SIZE_CROP_DEFAULT = 1.5
"""Default value of the field path 'Arguments threshold_block_size_crop'"""
THRESHOLD_BLOCK_SIZE_EMPTY_DEFAULT = 1.5
"""Default value of the field path 'Arguments threshold_block_size_empty'"""
THRESHOLD_BLOCK_SIZE_LIMIT_DEFAULT = 1.5
"""Default value of the field path 'Arguments threshold_block_size_limit'"""
THRESHOLD_VALUE_C_CROP_DEFAULT = 70
"""Default value of the field path 'Arguments threshold_value_c_crop'"""
THRESHOLD_VALUE_C_EMPTY_DEFAULT = 70
"""Default value of the field path 'Arguments threshold_value_c_empty'"""
THRESHOLD_VALUE_C_LIMIT_DEFAULT = 70
"""Default value of the field path 'Arguments threshold_value_c_limit'"""
TYPE_CONFLICT_DEFAULT = ["override"]
"""Default value of the field path 'Merge strategies type_conflict'"""
UPPER_HSV_COLOR_DEFAULT = [255, 10, 255]
"""Default value of the field path 'Auto mask upper_hsv_color'"""
VIEWER_DEFAULT = "eog"
"""Default value of the field path 'Configuration viewer'"""
class _ArgumentsDeskew(TypedDict, total=False):
"""The deskew configuration"""
min_angle: Union[int, float]
"""
Deskew min angle.
The minimum angle to detect the image skew [degree]
default: -45
"""
max_angle: Union[int, float]
"""
Deskew max angle.
The maximum angle to detect the image skew [degree]
default: 45
"""
angle_derivation: Union[int, float]
"""
Deskew angle derivation.
The step of angle to detect the image skew [degree]
default: 0.1
"""
sigma: Union[int, float]
"""
Deskew sigma.
Used in the `canny` function
default: 3.0
"""
num_peaks: int
"""
Deskew num peaks.
number of peaks we ask for
default: 20
"""
angle_pm_90: bool
"""
Deskew angle pm 90.
Detect an angle of +/- 90 degree, also +/- 45 degree
default: False
"""
|
/scan_to_paperless-1.25.0-py3-none-any.whl/scan_to_paperless/config.py
| 0.930734 | 0.675079 |
config.py
|
pypi
|
from typing import Dict, List, TypedDict, Union
APPEND_CREDIT_CARD_DEFAULT = False
"""Default value of the field path 'Arguments append_credit_card'"""
ASSISTED_SPLIT_DEFAULT = False
"""Default value of the field path 'Arguments assisted_split'"""
AUTO_LEVEL_DEFAULT = False
"""Default value of the field path 'Arguments auto_level'"""
class Arguments(TypedDict, total=False):
"""
Arguments.
Editor note: The properties of this object should be modified in the config_schema.json file
"""
level: Union[bool, int]
"""
Level.
true: => do level on 15% - 85% (under 15 % will be black above 85% will be white), false: => 0% - 100%, <number>: => (0 + <number>)% - (100 - number)%
default: False
"""
auto_level: bool
"""
Auto level.
If no level specified, do auto level
default: False
"""
min_level: Union[int, float]
"""
Min level.
Min level if no level end no auto-level
default: 0
"""
max_level: Union[int, float]
"""
Max level.
Max level if no level end no auto-level
default: 100
"""
cut_white: Union[int, float]
"""
Cut white.
Set the near white pixels on the image to white
default: 255
"""
cut_black: Union[int, float]
"""
Cut black.
Set the near black pixels on the image to black
default: 0
"""
no_crop: bool
"""
No crop.
Don't do any crop
default: False
"""
margin_horizontal: Union[int, float]
"""
Margin horizontal.
The horizontal margin used on auto-detect content [mm]
default: 9
"""
margin_vertical: Union[int, float]
"""
Margin vertical.
The vertical margin used on auto-detect content [mm]
default: 6
"""
dpi: Union[int, float]
"""
Dpi.
The DPI used to convert the mm to pixel
default: 300
"""
sharpen: bool
"""
Sharpen.
Do the sharpen
default: False
"""
dither: bool
"""
Dither.
Do the dither
default: False
"""
tesseract: bool
"""
Tesseract.
Use tesseract to to an OCR on the document
default: True
"""
tesseract_lang: str
"""
Tesseract lang.
The used language for tesseract
default: fra+eng
"""
append_credit_card: bool
"""
Append credit card.
Do an assisted split
default: False
"""
assisted_split: bool
"""
Assisted split.
Do an assisted split
default: False
"""
min_box_size_crop: Union[int, float]
"""
Min box size crop.
The minimum box size to find the content on witch one we will crop [mm]
default: 3
"""
min_box_black_crop: Union[int, float]
"""
Min box black crop.
The minimum black in a box on content find on witch one we will crop [%]
default: 2
"""
contour_kernel_size_crop: Union[int, float]
"""
Contour kernel size crop.
The block size used in a box on content find on witch one we will crop [mm]
default: 1.5
"""
threshold_block_size_crop: Union[int, float]
"""
Threshold block size crop.
The block size used in a box on threshold for content find on witch one we will crop [mm]
default: 1.5
"""
threshold_value_c_crop: Union[int, float]
"""
Threshold value c crop.
A variable used on threshold, should be low on low contrast image, used in a box on content find on witch one we will crop
default: 70
"""
min_box_size_empty: Union[int, float]
"""
Min box size empty.
The minimum box size to find the content to determine if the page is empty [mm]
default: 10
"""
min_box_black_empty: Union[int, float]
"""
Min box black empty.
The minimum black in a box on content find if the page is empty [%]
default: 2
"""
contour_kernel_size_empty: Union[int, float]
"""
Contour kernel size empty.
The block size used in a box on content find if the page is empty [mm]
default: 1.5
"""
threshold_block_size_empty: Union[int, float]
"""
Threshold block size empty.
The block size used in a box on threshold for content find if the page is empty [mm]
default: 1.5
"""
threshold_value_c_empty: Union[int, float]
"""
Threshold value c empty.
A variable used on threshold, should be low on low contrast image, used in a box on content find if the page is empty
default: 70
"""
min_box_size_limit: Union[int, float]
"""
Min box size limit.
The minimum box size to find the limits based on content [mm]
default: 10
"""
min_box_black_limit: Union[int, float]
"""
Min box black limit.
The minimum black in a box on content find the limits based on content [%]
default: 2
"""
contour_kernel_size_limit: Union[int, float]
"""
Contour kernel size limit.
The block size used in a box on content find the limits based on content [mm]
default: 1.5
"""
threshold_block_size_limit: Union[int, float]
"""
Threshold block size limit.
The block size used in a box on threshold for content find the limits based on content [mm]
default: 1.5
"""
threshold_value_c_limit: Union[int, float]
"""
Threshold value c limit.
A variable used on threshold, should be low on low contrast image, used in a box on content find the limits based on content
default: 70
"""
colors: int
"""
Colors.
The number of colors in the png
default: 0
"""
run_optipng: bool
"""
Run optipng.
Run the optipng optimizer
default: True
"""
run_pngquant: bool
"""
Run pngquant.
Run the pngquant optimizer
default: False
"""
pngquant_options: List[str]
"""
Pngquant options.
The pngquant options
default:
- --force
- --speed=1
- --strip
- --quality=0-32
"""
run_exiftool: bool
"""
Run exiftool.
Run the exiftool optimizer
default: False
"""
run_ps2pdf: bool
"""
Run ps2pdf.
Run the ps2pdf optimizer (=> JPEG)
default: False
"""
no_auto_rotate: bool
"""
No auto rotate.
Run the auto rotate detected by Tesseract
default: False
"""
jpeg: bool
"""
Jpeg.
Convert images to JPEG
default: False
"""
jpeg_quality: int
"""
Jpeg quality.
The JPEG quality
default: 90
"""
background_color: List[int]
"""
Background color.
The background color
default:
- 255
- 255
- 255
"""
auto_mask: "AutoMask"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
auto_cut: "AutoMask"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
deskew: "_ArgumentsDeskew"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
line_detection: "LineDetection"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
rule: "Rule"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
class AssistedSplit(TypedDict, total=False):
"""
Assisted split.
Assisted split configuration
"""
source: str
"""The source image name."""
destinations: List[Union[int, str]]
"""The destination image positions."""
image: str
"""The enhanced image name."""
limits: List["Limit"]
"""The (proposed) limits to do the assisted split, You should keep only the right one"""
class AutoMask(TypedDict, total=False):
"""
Auto mask.
The auto mask configuration, the mask is used to mask the image on crop and skew calculation
Editor note: The properties of this object should be modified in the config_schema.json file
"""
lower_hsv_color: List[int]
"""
Lower hsv color.
The lower color in HSV representation
default:
- 0
- 0
- 250
"""
upper_hsv_color: List[int]
"""
Upper hsv color.
The upper color in HSV representation
default:
- 255
- 10
- 255
"""
de_noise_morphology: bool
"""
De noise morphology.
Apply a morphology operation to remove noise
default: True
"""
inverse_mask: bool
"""
Inverse mask.
Inverse the mask
default: False
"""
de_noise_size: int
"""
De noise size.
The size of the artifact that will be de noise
default: 1000
"""
de_noise_level: int
"""
De noise level.
The threshold level used in de noise on the blurry image
default: 220
"""
buffer_size: int
"""
Buffer size.
The size of the buffer add on the mask
default: 20
"""
buffer_level: int
"""
Buffer level.
The threshold level used in buffer on the blurry image
default: 20
"""
additional_filename: str
"""An image file used to add on the mask"""
BACKGROUND_COLOR_DEFAULT = [255, 255, 255]
"""Default value of the field path 'Arguments background_color'"""
BUFFER_LEVEL_DEFAULT = 20
"""Default value of the field path 'Auto mask buffer_level'"""
BUFFER_SIZE_DEFAULT = 20
"""Default value of the field path 'Auto mask buffer_size'"""
COLORS_DEFAULT = 0
"""Default value of the field path 'Arguments colors'"""
CONTOUR_KERNEL_SIZE_CROP_DEFAULT = 1.5
"""Default value of the field path 'Arguments contour_kernel_size_crop'"""
CONTOUR_KERNEL_SIZE_EMPTY_DEFAULT = 1.5
"""Default value of the field path 'Arguments contour_kernel_size_empty'"""
CONTOUR_KERNEL_SIZE_LIMIT_DEFAULT = 1.5
"""Default value of the field path 'Arguments contour_kernel_size_limit'"""
CUT_BLACK_DEFAULT = 0
"""Default value of the field path 'Arguments cut_black'"""
CUT_WHITE_DEFAULT = 255
"""Default value of the field path 'Arguments cut_white'"""
class Configuration(TypedDict, total=False):
"""Configuration."""
images: List[str]
"""
The images
required
"""
args: "Arguments"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
required
"""
progress: bool
"""
Progress.
Run in progress mode
default: False
"""
steps: List["Step"]
"""The carried out steps description"""
assisted_split: List["AssistedSplit"]
transformed_images: List[str]
"""The transformed image, if removed the jobs will rag again from start"""
intermediate_error: List["IntermediateError"]
"""The ignored errors"""
images_config: Dict[str, "_ConfigurationImagesConfigAdditionalproperties"]
DESKEW_ANGLE_DERIVATION_DEFAULT = 0.1
"""Default value of the field path 'Arguments deskew angle_derivation'"""
DESKEW_ANGLE_PM_90_DEFAULT = False
"""Default value of the field path 'Arguments deskew angle_pm_90'"""
DESKEW_MAX_ANGLE_DEFAULT = 45
"""Default value of the field path 'Arguments deskew max_angle'"""
DESKEW_MIN_ANGLE_DEFAULT = -45
"""Default value of the field path 'Arguments deskew min_angle'"""
DESKEW_NUM_PEAKS_DEFAULT = 20
"""Default value of the field path 'Arguments deskew num_peaks'"""
DESKEW_SIGMA_DEFAULT = 3.0
"""Default value of the field path 'Arguments deskew sigma'"""
DE_NOISE_LEVEL_DEFAULT = 220
"""Default value of the field path 'Auto mask de_noise_level'"""
DE_NOISE_MORPHOLOGY_DEFAULT = True
"""Default value of the field path 'Auto mask de_noise_morphology'"""
DE_NOISE_SIZE_DEFAULT = 1000
"""Default value of the field path 'Auto mask de_noise_size'"""
DITHER_DEFAULT = False
"""Default value of the field path 'Arguments dither'"""
DPI_DEFAULT = 300
"""Default value of the field path 'Arguments dpi'"""
INVERSE_MASK_DEFAULT = False
"""Default value of the field path 'Auto mask inverse_mask'"""
class IntermediateError(TypedDict, total=False):
"""Intermediate error."""
error: str
traceback: List[str]
JPEG_DEFAULT = False
"""Default value of the field path 'Arguments jpeg'"""
JPEG_QUALITY_DEFAULT = 90
"""Default value of the field path 'Arguments jpeg_quality'"""
LEVEL_DEFAULT = False
"""Default value of the field path 'Arguments level'"""
LINE_DETECTION_APERTURE_SIZE_DEFAULT = 3
"""Default value of the field path 'Line detection aperture_size'"""
LINE_DETECTION_HIGH_THRESHOLD_DEFAULT = 1000
"""Default value of the field path 'Line detection high_threshold'"""
LINE_DETECTION_LOW_THRESHOLD_DEFAULT = 0
"""Default value of the field path 'Line detection low_threshold'"""
LINE_DETECTION_MAX_LINE_GAP_DEFAULT = 100
"""Default value of the field path 'Line detection max_line_gap'"""
LINE_DETECTION_MIN_LINE_LENGTH_DEFAULT = 50
"""Default value of the field path 'Line detection min_line_length'"""
LINE_DETECTION_RHO_DEFAULT = 1
"""Default value of the field path 'Line detection rho'"""
LINE_DETECTION_THRESHOLD_DEFAULT = 100
"""Default value of the field path 'Line detection threshold'"""
LOWER_HSV_COLOR_DEFAULT = [0, 0, 250]
"""Default value of the field path 'Auto mask lower_hsv_color'"""
class Limit(TypedDict, total=False):
"""Limit."""
name: str
"""The name visible on the generated image"""
type: str
"""The kind of split"""
value: int
"""The split position"""
vertical: bool
"""Is vertical?"""
margin: int
"""The margin around the split, can be used to remove a fold"""
class LineDetection(TypedDict, total=False):
"""
Line detection.
The line detection used in assisted split
"""
low_threshold: int
"""
Line detection low threshold.
The low threshold used in the Canny edge detector
default: 0
"""
high_threshold: int
"""
Line detection high threshold.
The high threshold used in the Canny edge detector
default: 1000
"""
aperture_size: int
"""
Line detection aperture size.
The aperture size used in the Canny edge detector
default: 3
"""
rho: int
"""
Line detection rho.
The rho used in the Hough transform
default: 1
"""
threshold: int
"""
Line detection threshold.
The threshold used in the Hough transform
default: 100
"""
min_line_length: int
"""
Line detection min line length.
The minimum line length in percentage of the image size used in the Hough transform
default: 50
"""
max_line_gap: int
"""
Line detection max line gap.
The maximum line gap in percentage of the image size used in the Hough transform
default: 100
"""
MARGIN_HORIZONTAL_DEFAULT = 9
"""Default value of the field path 'Arguments margin_horizontal'"""
MARGIN_VERTICAL_DEFAULT = 6
"""Default value of the field path 'Arguments margin_vertical'"""
MAX_LEVEL_DEFAULT = 100
"""Default value of the field path 'Arguments max_level'"""
MIN_BOX_BLACK_CROP_DEFAULT = 2
"""Default value of the field path 'Arguments min_box_black_crop'"""
MIN_BOX_BLACK_EMPTY_DEFAULT = 2
"""Default value of the field path 'Arguments min_box_black_empty'"""
MIN_BOX_BLACK_LIMIT_DEFAULT = 2
"""Default value of the field path 'Arguments min_box_black_limit'"""
MIN_BOX_SIZE_CROP_DEFAULT = 3
"""Default value of the field path 'Arguments min_box_size_crop'"""
MIN_BOX_SIZE_EMPTY_DEFAULT = 10
"""Default value of the field path 'Arguments min_box_size_empty'"""
MIN_BOX_SIZE_LIMIT_DEFAULT = 10
"""Default value of the field path 'Arguments min_box_size_limit'"""
MIN_LEVEL_DEFAULT = 0
"""Default value of the field path 'Arguments min_level'"""
NO_AUTO_ROTATE_DEFAULT = False
"""Default value of the field path 'Arguments no_auto_rotate'"""
NO_CROP_DEFAULT = False
"""Default value of the field path 'Arguments no_crop'"""
PNGQUANT_OPTIONS_DEFAULT = ["--force", "--speed=1", "--strip", "--quality=0-32"]
"""Default value of the field path 'Arguments pngquant_options'"""
PROGRESS_DEFAULT = False
"""Default value of the field path 'Configuration progress'"""
RULE_ENABLE_DEFAULT = True
"""Default value of the field path 'Rule enable'"""
RULE_GRADUATION_COLOR_DEFAULT = [0, 0, 0]
"""Default value of the field path 'Rule graduation_color'"""
RULE_GRADUATION_TEXT_FONT_COLOR_DEFAULT = [0, 0, 0]
"""Default value of the field path 'Rule graduation_text_font_color'"""
RULE_GRADUATION_TEXT_FONT_FILENAME_DEFAULT = "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf"
"""Default value of the field path 'Rule graduation_text_font_filename'"""
RULE_GRADUATION_TEXT_FONT_SIZE_DEFAULT = 17
"""Default value of the field path 'Rule graduation_text_font_size'"""
RULE_GRADUATION_TEXT_MARGIN_DEFAULT = 6
"""Default value of the field path 'Rule graduation_text_margin'"""
RULE_LINES_COLOR_DEFAULT = [0, 0, 0]
"""Default value of the field path 'Rule lines_color'"""
RULE_LINES_OPACITY_DEFAULT = 0.2
"""Default value of the field path 'Rule lines_opacity'"""
RULE_LINES_SPACE_DEFAULT = 100
"""Default value of the field path 'Rule lines_space'"""
RULE_MAJOR_GRADUATION_SIZE_DEFAULT = 30
"""Default value of the field path 'Rule major_graduation_size'"""
RULE_MAJOR_GRADUATION_SPACE_DEFAULT = 100
"""Default value of the field path 'Rule major_graduation_space'"""
RULE_MINOR_GRADUATION_SIZE_DEFAULT = 10
"""Default value of the field path 'Rule minor_graduation_size'"""
RULE_MINOR_GRADUATION_SPACE_DEFAULT = 10
"""Default value of the field path 'Rule minor_graduation_space'"""
RUN_EXIFTOOL_DEFAULT = False
"""Default value of the field path 'Arguments run_exiftool'"""
RUN_OPTIPNG_DEFAULT = True
"""Default value of the field path 'Arguments run_optipng'"""
RUN_PNGQUANT_DEFAULT = False
"""Default value of the field path 'Arguments run_pngquant'"""
RUN_PS2PDF_DEFAULT = False
"""Default value of the field path 'Arguments run_ps2pdf'"""
class Rule(TypedDict, total=False):
"""
Rule.
Configuration of rule displayed in assisted split images
"""
enable: bool
"""
Rule enable.
default: True
"""
minor_graduation_space: int
"""
Rule minor graduation space.
default: 10
"""
major_graduation_space: int
"""
Rule major graduation space.
default: 100
"""
lines_space: int
"""
Rule lines space.
default: 100
"""
minor_graduation_size: int
"""
Rule minor graduation size.
default: 10
"""
major_graduation_size: int
"""
Rule major graduation size.
default: 30
"""
graduation_color: List[int]
"""
Rule graduation color.
default:
- 0
- 0
- 0
"""
lines_color: List[int]
"""
Rule lines color.
default:
- 0
- 0
- 0
"""
lines_opacity: Union[int, float]
"""
Rule lines opacity.
default: 0.2
"""
graduation_text_font_filename: str
"""
Rule graduation text font filename.
default: /usr/share/fonts/truetype/dejavu/DejaVuSans.ttf
"""
graduation_text_font_size: Union[int, float]
"""
Rule graduation text font size.
default: 17
"""
graduation_text_font_color: List[int]
"""
Rule graduation text font color.
default:
- 0
- 0
- 0
"""
graduation_text_margin: int
"""
Rule graduation text margin.
default: 6
"""
SHARPEN_DEFAULT = False
"""Default value of the field path 'Arguments sharpen'"""
class Step(TypedDict, total=False):
"""Step."""
name: str
"""The step name"""
sources: List[str]
"""The images obtain after the current step"""
process_count: int
"""The step number"""
TESSERACT_DEFAULT = True
"""Default value of the field path 'Arguments tesseract'"""
TESSERACT_LANG_DEFAULT = "fra+eng"
"""Default value of the field path 'Arguments tesseract_lang'"""
THRESHOLD_BLOCK_SIZE_CROP_DEFAULT = 1.5
"""Default value of the field path 'Arguments threshold_block_size_crop'"""
THRESHOLD_BLOCK_SIZE_EMPTY_DEFAULT = 1.5
"""Default value of the field path 'Arguments threshold_block_size_empty'"""
THRESHOLD_BLOCK_SIZE_LIMIT_DEFAULT = 1.5
"""Default value of the field path 'Arguments threshold_block_size_limit'"""
THRESHOLD_VALUE_C_CROP_DEFAULT = 70
"""Default value of the field path 'Arguments threshold_value_c_crop'"""
THRESHOLD_VALUE_C_EMPTY_DEFAULT = 70
"""Default value of the field path 'Arguments threshold_value_c_empty'"""
THRESHOLD_VALUE_C_LIMIT_DEFAULT = 70
"""Default value of the field path 'Arguments threshold_value_c_limit'"""
UPPER_HSV_COLOR_DEFAULT = [255, 10, 255]
"""Default value of the field path 'Auto mask upper_hsv_color'"""
class _ArgumentsDeskew(TypedDict, total=False):
"""The deskew configuration"""
min_angle: Union[int, float]
"""
Deskew min angle.
The minimum angle to detect the image skew [degree]
default: -45
"""
max_angle: Union[int, float]
"""
Deskew max angle.
The maximum angle to detect the image skew [degree]
default: 45
"""
angle_derivation: Union[int, float]
"""
Deskew angle derivation.
The step of angle to detect the image skew [degree]
default: 0.1
"""
sigma: Union[int, float]
"""
Deskew sigma.
Used in the `canny` function
default: 3.0
"""
num_peaks: int
"""
Deskew num peaks.
number of peaks we ask for
default: 20
"""
angle_pm_90: bool
"""
Deskew angle pm 90.
Detect an angle of +/- 90 degree, also +/- 45 degree
default: False
"""
class _ConfigurationImagesConfigAdditionalproperties(TypedDict, total=False):
angle: Union[Union[int, float], None]
"""The used angle to deskew, can be change, restart by deleting one of the generated images"""
status: "_ConfigurationImagesConfigAdditionalpropertiesStatus"
"""
WARNING: The required are not correctly taken in account,
See: https://github.com/camptocamp/jsonschema-gentypes/issues/6
"""
class _ConfigurationImagesConfigAdditionalpropertiesStatus(TypedDict, total=False):
angle: Union[int, float]
"""The measured deskew angle"""
size: List[Union[int, float]]
"""The image dimensions"""
|
/scan_to_paperless-1.25.0-py3-none-any.whl/scan_to_paperless/process_schema.py
| 0.927079 | 0.680641 |
process_schema.py
|
pypi
|
import argparse
import glob
import json
import logging
import math
import os
import re
import shutil
import subprocess # nosec
import sys
import tempfile
import time
import traceback
from typing import IO, TYPE_CHECKING, Any, Dict, List, Optional, Protocol, Tuple, TypedDict, Union, cast
# read, write, rotate, crop, sharpen, draw_line, find_line, find_contour
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pikepdf
from deskew import determine_skew_debug_images
from PIL import Image, ImageDraw, ImageFont
from ruamel.yaml.main import YAML
from skimage.color import rgb2gray, rgba2rgb
from skimage.exposure import histogram as skimage_histogram
from skimage.metrics import structural_similarity
from skimage.util import img_as_ubyte
from scan_to_paperless import code
from scan_to_paperless import process_schema as schema
if TYPE_CHECKING:
NpNdarrayInt = np.ndarray[np.uint8, Any]
CompletedProcess = subprocess.CompletedProcess[str]
else:
NpNdarrayInt = np.ndarray
CompletedProcess = subprocess.CompletedProcess
# dither, crop, append, repage
CONVERT = ["gm", "convert"]
_LOG = logging.getLogger(__name__)
class ScanToPaperlessException(Exception):
"""Base exception for this module."""
def rotate_image(
image: NpNdarrayInt, angle: float, background: Union[int, Tuple[int, int, int]]
) -> NpNdarrayInt:
"""Rotate the image."""
old_width, old_height = image.shape[:2]
angle_radian = math.radians(angle)
width = abs(np.sin(angle_radian) * old_height) + abs(np.cos(angle_radian) * old_width)
height = abs(np.sin(angle_radian) * old_width) + abs(np.cos(angle_radian) * old_height)
image_center: Tuple[Any, ...] = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
rot_mat[1, 2] += (width - old_width) / 2
rot_mat[0, 2] += (height - old_height) / 2
return cast(
NpNdarrayInt,
cv2.warpAffine(image, rot_mat, (int(round(height)), int(round(width))), borderValue=background),
)
def crop_image( # pylint: disable=too-many-arguments
image: NpNdarrayInt,
x: int,
y: int,
width: int,
height: int,
background: Union[Tuple[int], Tuple[int, int, int]],
) -> NpNdarrayInt:
"""Crop the image."""
matrice: NpNdarrayInt = np.array([[1.0, 0.0, -x], [0.0, 1.0, -y]])
return cast(
NpNdarrayInt,
cv2.warpAffine(image, matrice, (int(round(width)), int(round(height))), borderValue=background),
)
class Context: # pylint: disable=too-many-instance-attributes
"""All the context of the current image with his mask."""
def __init__( # pylint: disable=too-many-arguments
self,
config: schema.Configuration,
step: schema.Step,
config_file_name: Optional[str] = None,
root_folder: Optional[str] = None,
image_name: Optional[str] = None,
) -> None:
"""Initialize."""
self.config = config
self.step = step
self.config_file_name = config_file_name
self.root_folder = root_folder
self.image_name = image_name
self.image: Optional[NpNdarrayInt] = None
self.mask: Optional[NpNdarrayInt] = None
self.process_count = self.step.get("process_count", 0)
def _get_mask(
self,
auto_mask_config: Optional[schema.AutoMask],
config_section: str,
default_file_name: str,
) -> Optional[NpNdarrayInt]:
"""Init the mask."""
if auto_mask_config is not None:
hsv = cv2.cvtColor(self.image, cv2.COLOR_BGR2HSV)
lower_val = np.array(
auto_mask_config.setdefault("lower_hsv_color", schema.LOWER_HSV_COLOR_DEFAULT)
)
upper_val = np.array(
auto_mask_config.setdefault("upper_hsv_color", schema.UPPER_HSV_COLOR_DEFAULT)
)
mask = cv2.inRange(hsv, lower_val, upper_val)
de_noise_size = auto_mask_config.setdefault("de_noise_size", schema.DE_NOISE_SIZE_DEFAULT)
mask = cv2.copyMakeBorder(
mask,
de_noise_size,
de_noise_size,
de_noise_size,
de_noise_size,
cv2.BORDER_REPLICATE,
)
if auto_mask_config.get("de_noise_morphology", True):
mask = cv2.morphologyEx(
mask,
cv2.MORPH_CLOSE,
cv2.getStructuringElement(cv2.MORPH_RECT, (de_noise_size, de_noise_size)),
)
else:
blur = cv2.blur(
mask,
(de_noise_size, de_noise_size),
)
_, mask = cv2.threshold(
blur,
auto_mask_config.setdefault("de_noise_level", schema.DE_NOISE_LEVEL_DEFAULT),
255,
cv2.THRESH_BINARY,
)
inverse_mask = auto_mask_config.get("inverse_mask", False)
if not inverse_mask:
mask = cv2.bitwise_not(mask)
buffer_size = auto_mask_config.setdefault("buffer_size", schema.BUFFER_SIZE_DEFAULT)
blur = cv2.blur(mask, (buffer_size, buffer_size))
_, mask = cv2.threshold(
blur,
auto_mask_config.setdefault("buffer_level", schema.BUFFER_LEVEL_DEFAULT),
255,
cv2.THRESH_BINARY,
)
mask = mask[de_noise_size:-de_noise_size, de_noise_size:-de_noise_size]
if self.root_folder:
mask_file: Optional[str] = os.path.join(self.root_folder, default_file_name)
assert mask_file
if not os.path.exists(mask_file):
base_folder = os.path.dirname(self.root_folder)
assert base_folder
mask_file = os.path.join(base_folder, default_file_name)
if not os.path.exists(mask_file):
mask_file = None
mask_file = (
auto_mask_config.setdefault("additional_filename", mask_file)
if mask_file
else auto_mask_config.get("additional_filename")
)
if mask_file and os.path.exists(mask_file):
mask = cv2.add(
mask,
cv2.bitwise_not(
cv2.resize(
cv2.imread(mask_file, cv2.IMREAD_GRAYSCALE),
(mask.shape[1], mask.shape[0]),
)
),
)
final_mask = cv2.bitwise_not(mask)
if os.environ.get("PROGRESS", "FALSE") == "TRUE" and self.root_folder:
self.save_progress_images(config_section.replace("_", "-"), final_mask)
elif self.root_folder:
mask_file = os.path.join(self.root_folder, default_file_name)
if not os.path.exists(mask_file):
base_folder = os.path.dirname(self.root_folder)
assert base_folder
mask_file = os.path.join(base_folder, default_file_name)
if not os.path.exists(mask_file):
return None
final_mask = cv2.imread(mask_file, cv2.IMREAD_GRAYSCALE)
if self.image is not None and final_mask is not None:
return cast(NpNdarrayInt, cv2.resize(final_mask, (self.image.shape[1], self.image.shape[0])))
return cast(NpNdarrayInt, final_mask)
def init_mask(self) -> None:
"""Init the mask image used to mask the image on the crop and skew calculation."""
self.mask = self._get_mask(self.config["args"].get("auto_mask"), "auto_mask", "mask.png")
def get_background_color(self) -> Tuple[int, int, int]:
"""Get the background color."""
return cast(
Tuple[int, int, int],
self.config["args"].setdefault("background_color", schema.BACKGROUND_COLOR_DEFAULT),
)
def do_initial_cut(self) -> None:
"""Definitively mask the original image."""
if "auto_cut" in self.config["args"]:
assert self.image is not None
mask = self._get_mask(self.config["args"].get("auto_cut"), "auto_cut", "cut.png")
self.image[mask == 0] = self.get_background_color()
def get_process_count(self) -> int:
"""Get the step number."""
try:
return self.process_count
finally:
self.process_count += 1
def get_masked(self) -> NpNdarrayInt:
"""Get the mask."""
if self.image is None:
raise ScanToPaperlessException("The image is None")
if self.mask is None:
return self.image.copy()
image = self.image.copy()
image[self.mask == 0] = self.get_background_color()
return image
def crop(self, x: int, y: int, width: int, height: int) -> None:
"""Crop the image."""
if self.image is None:
raise ScanToPaperlessException("The image is None")
self.image = crop_image(self.image, x, y, width, height, self.get_background_color())
if self.mask is not None:
self.mask = crop_image(self.mask, x, y, width, height, (0,))
def rotate(self, angle: float) -> None:
"""Rotate the image."""
if self.image is None:
raise ScanToPaperlessException("The image is None")
self.image = rotate_image(self.image, angle, self.get_background_color())
if self.mask is not None:
self.mask = rotate_image(self.mask, angle, 0)
def get_px_value(self, name: str, default: Union[int, float]) -> float:
"""Get the value in px."""
return (
cast(float, cast(Dict[str, Any], self.config["args"]).setdefault(name, default))
/ 10
/ 2.51
* self.config["args"].setdefault("dpi", schema.DPI_DEFAULT)
)
def is_progress(self) -> bool:
"""Return we want to have the intermediate files."""
return os.environ.get("PROGRESS", "FALSE") == "TRUE" or self.config.setdefault(
"progress", schema.PROGRESS_DEFAULT
)
def save_progress_images(
self,
name: str,
image: Optional[NpNdarrayInt] = None,
image_prefix: str = "",
process_count: Optional[int] = None,
force: bool = False,
) -> Optional[str]:
"""Save the intermediate images."""
if process_count is None:
process_count = self.get_process_count()
if (self.is_progress() or force) and self.image_name is not None and self.root_folder is not None:
name = f"{process_count}-{name}" if self.is_progress() else name
dest_folder = os.path.join(self.root_folder, name)
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
dest_image = os.path.join(dest_folder, image_prefix + self.image_name)
if image is not None:
try:
cv2.imwrite(dest_image, image)
return dest_image
except Exception as exception:
print(exception)
else:
try:
cv2.imwrite(dest_image, self.image)
except Exception as exception:
print(exception)
dest_image = os.path.join(dest_folder, "mask-" + self.image_name)
try:
dest_image = os.path.join(dest_folder, "masked-" + self.image_name)
except Exception as exception:
print(exception)
try:
cv2.imwrite(dest_image, self.get_masked())
except Exception as exception:
print(exception)
return None
def add_intermediate_error(
config: schema.Configuration,
config_file_name: Optional[str],
error: Exception,
traceback_: List[str],
) -> None:
"""Add in the config non fatal error."""
if config_file_name is None:
raise ScanToPaperlessException("The config file name is required") from error
if "intermediate_error" not in config:
config["intermediate_error"] = []
old_intermediate_error: List[schema.IntermediateError] = []
old_intermediate_error.extend(config["intermediate_error"])
yaml = YAML()
yaml.default_flow_style = False
try:
config["intermediate_error"].append({"error": str(error), "traceback": traceback_})
with open(config_file_name + "_", "w", encoding="utf-8") as config_file:
yaml.dump(config, config_file)
except Exception as exception:
print(exception)
config["intermediate_error"] = old_intermediate_error
config["intermediate_error"].append({"error": str(error), "traceback": traceback_})
with open(config_file_name + "_", "w", encoding="utf-8") as config_file:
yaml.dump(config, config_file)
os.rename(config_file_name + "_", config_file_name)
def call(cmd: Union[str, List[str]], **kwargs: Any) -> None:
"""Verbose version of check_output with no returns."""
if isinstance(cmd, list):
cmd = [str(element) for element in cmd]
print(" ".join(cmd) if isinstance(cmd, list) else cmd)
sys.stdout.flush()
kwargs.setdefault("check", True)
subprocess.run( # nosec # pylint: disable=subprocess-run-check
cmd,
capture_output=True,
**kwargs,
)
def run(cmd: Union[str, List[str]], **kwargs: Any) -> CompletedProcess:
"""Verbose version of check_output with no returns."""
if isinstance(cmd, list):
cmd = [str(element) for element in cmd]
print(" ".join(cmd) if isinstance(cmd, list) else cmd)
sys.stdout.flush()
return subprocess.run(cmd, stderr=subprocess.PIPE, check=True, **kwargs) # nosec
def output(cmd: Union[str, List[str]], **kwargs: Any) -> str:
"""Verbose version of check_output."""
if isinstance(cmd, list):
cmd = [str(element) for element in cmd]
print(" ".join(cmd) if isinstance(cmd, list) else cmd)
sys.stdout.flush()
return cast(bytes, subprocess.check_output(cmd, stderr=subprocess.PIPE, **kwargs)).decode() # nosec
def image_diff(image1: NpNdarrayInt, image2: NpNdarrayInt) -> Tuple[float, NpNdarrayInt]:
"""Do a diff between images."""
width = max(image1.shape[1], image2.shape[1])
height = max(image1.shape[0], image2.shape[0])
image1 = cv2.resize(image1, (width, height))
image2 = cv2.resize(image2, (width, height))
image1 = image1 if len(image1.shape) == 2 else cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
image2 = image2 if len(image2.shape) == 2 else cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
score, diff = structural_similarity(image1, image2, full=True)
diff = (255 - diff * 255).astype("uint8")
return score, diff
class FunctionWithContextReturnsImage(Protocol):
"""Function with context and returns an image."""
def __call__(self, context: Context) -> Optional[NpNdarrayInt]:
"""Call the function."""
class FunctionWithContextReturnsNone(Protocol):
"""Function with context and no return."""
def __call__(self, context: Context) -> None:
"""Call the function."""
class ExternalFunction(Protocol):
"""Function that call an external tool."""
def __call__(self, context: Context, source: str, destination: str) -> None:
"""Call the function."""
# Decorate a step of the transform
class Process: # pylint: disable=too-few-public-methods
"""
Encapsulate a transform function.
To save the process image when needed.
"""
def __init__(self, name: str, ignore_error: bool = False, progress: bool = True) -> None:
"""Initialize."""
self.name = name
self.ignore_error = ignore_error
self.progress = progress
def __call__(self, func: FunctionWithContextReturnsImage) -> FunctionWithContextReturnsNone:
"""Call the function."""
def wrapper(context: Context) -> None:
start_time = time.perf_counter()
if self.ignore_error:
try:
new_image = func(context)
if new_image is not None and self.ignore_error:
context.image = new_image
except Exception as exception:
print(exception)
add_intermediate_error(
context.config,
context.config_file_name,
exception,
traceback.format_exc().split("\n"),
)
else:
new_image = func(context)
if new_image is not None:
context.image = new_image
elapsed_time = time.perf_counter() - start_time
if os.environ.get("TIME", "FALSE") == "TRUE":
print(f"Elapsed time in {self.name}: {int(round(elapsed_time))}s.")
if self.progress:
context.save_progress_images(self.name)
return wrapper
def external(func: ExternalFunction) -> FunctionWithContextReturnsImage:
"""Run an external tool."""
def wrapper(context: Context) -> Optional[NpNdarrayInt]:
with tempfile.NamedTemporaryFile(suffix=".png") as source:
cv2.imwrite(source.name, context.image)
with tempfile.NamedTemporaryFile(suffix=".png") as destination:
func(context, source.name, destination.name)
return cast(NpNdarrayInt, cv2.imread(destination.name))
return wrapper
def get_contour_to_crop(
contours: List[Tuple[int, int, int, int]], margin_horizontal: int = 0, margin_vertical: int = 0
) -> Tuple[int, int, int, int]:
"""Get the contour to crop."""
content = [
contours[0][0],
contours[0][1],
contours[0][0] + contours[0][2],
contours[0][1] + contours[0][3],
]
for contour in contours:
content[0] = min(content[0], contour[0])
content[1] = min(content[1], contour[1])
content[2] = max(content[2], contour[0] + contour[2])
content[3] = max(content[3], contour[1] + contour[3])
return (
content[0] - margin_horizontal,
content[1] - margin_vertical,
content[2] - content[0] + 2 * margin_horizontal,
content[3] - content[1] + 2 * margin_vertical,
)
def crop(context: Context, margin_horizontal: int, margin_vertical: int) -> None:
"""
Do a crop on an image.
Margin in px
"""
image = context.get_masked()
process_count = context.get_process_count()
contours = find_contours(image, context, process_count, "crop", "crop", schema.MIN_BOX_SIZE_CROP_DEFAULT)
if contours:
for contour in contours:
draw_rectangle(image, contour)
context.save_progress_images("crop", image, process_count=process_count, force=True)
x, y, width, height = get_contour_to_crop(contours, margin_horizontal, margin_vertical)
context.crop(x, y, width, height)
def _get_level(context: Context) -> Tuple[bool, float, float]:
level_ = context.config["args"].setdefault("level", schema.LEVEL_DEFAULT)
min_p100 = 0.0
max_p100 = 100.0
if level_ is True:
min_p100 = schema.MIN_LEVEL_DEFAULT
max_p100 = schema.MAX_LEVEL_DEFAULT
elif isinstance(level_, (float, int)):
min_p100 = 0.0 + level_
max_p100 = 100.0 - level_
if level_ is not False:
min_p100 = context.config["args"].setdefault("min_level", min_p100)
max_p100 = context.config["args"].setdefault("max_level", max_p100)
min_ = min_p100 / 100.0 * 255.0
max_ = max_p100 / 100.0 * 255.0
return level_ is not False, min_, max_
def _histogram(
context: Context,
histogram_data: Any,
histogram_centers: Any,
histogram_max: Any,
process_count: int,
log: bool,
) -> None:
_, axes = plt.subplots(figsize=(15, 5))
axes.set_xlim(0, 255)
if log:
axes.semilogy(histogram_centers, histogram_data, lw=1)
else:
axes.plot(histogram_centers, histogram_data, lw=1)
axes.set_title("Gray-level histogram")
points = []
level_, min_, max_ = _get_level(context)
if level_ and min_ > 0:
points.append(("min_level", min_, histogram_max / 5))
cut_white = (
context.config["args"].setdefault("cut_white", schema.CUT_WHITE_DEFAULT) / 255 * (max_ - min_) + min_
)
cut_black = (
context.config["args"].setdefault("cut_black", schema.CUT_BLACK_DEFAULT) / 255 * (max_ - min_) + min_
)
if cut_black > 0.0:
points.append(("cut_black", cut_black, histogram_max / 10))
if cut_white < 255.0:
points.append(("cut_white", cut_white, histogram_max / 5))
if level_ and max_ < 255.0:
points.append(("max_level", max_, histogram_max / 10))
for label, value, pos in points:
if int(round(value)) < len(histogram_data):
hist_value = histogram_data[int(round(value))]
axes.annotate(
label,
xy=(value, hist_value),
xycoords="data",
xytext=(value, hist_value + pos),
textcoords="data",
arrowprops={"facecolor": "black", "width": 1},
)
plt.tight_layout()
with tempfile.NamedTemporaryFile(suffix=".png") as file:
plt.savefig(file.name)
subprocess.run(["gm", "convert", "-flatten", file.name, file.name], check=True) # nosec
image = cv2.imread(file.name)
context.save_progress_images(
"histogram", image, image_prefix="log-" if log else "", process_count=process_count, force=True
)
@Process("histogram", progress=False)
def histogram(context: Context) -> None:
"""Create an image with the histogram of the current image."""
noisy_image = img_as_ubyte(context.image)
histogram_data, histogram_centers = skimage_histogram(noisy_image)
histogram_max = max(histogram_data)
process_count = context.get_process_count()
_histogram(context, histogram_data, histogram_centers, histogram_max, process_count, False)
_histogram(context, histogram_data, histogram_centers, histogram_max, process_count, True)
@Process("level")
def level(context: Context) -> NpNdarrayInt:
"""Do the level on an image."""
img_yuv = cv2.cvtColor(context.image, cv2.COLOR_BGR2YUV)
if context.config["args"].setdefault("auto_level", schema.AUTO_LEVEL_DEFAULT):
img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])
return cast(NpNdarrayInt, cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR))
_, min_, max_ = _get_level(context)
chanel_y = img_yuv[:, :, 0]
mins = np.zeros(chanel_y.shape)
maxs: NpNdarrayInt = np.zeros(chanel_y.shape) + 255
values = (chanel_y - min_) / (max_ - min_) * 255
img_yuv[:, :, 0] = np.minimum(maxs, np.maximum(mins, values))
return cast(NpNdarrayInt, cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR))
@Process("color-cut")
def color_cut(context: Context) -> None:
"""Set the near white to white and near black to black."""
assert context.image is not None
grayscale = cv2.cvtColor(context.image, cv2.COLOR_BGR2GRAY)
white_mask = cv2.inRange(
grayscale, context.config["args"].setdefault("cut_white", schema.CUT_WHITE_DEFAULT), 255
)
black_mask = cv2.inRange(
grayscale, 0, context.config["args"].setdefault("cut_black", schema.CUT_BLACK_DEFAULT)
)
context.image[white_mask == 255] = (255, 255, 255)
context.image[black_mask == 255] = (0, 0, 0)
@Process("mask-cut")
def cut(context: Context) -> None:
"""Mask the image with the cut mask."""
context.do_initial_cut()
@Process("deskew")
def deskew(context: Context) -> None:
"""Deskew an image."""
images_config = context.config.setdefault("images_config", {})
assert context.image_name
image_config = images_config.setdefault(context.image_name, {})
image_status = image_config.setdefault("status", {})
angle = image_config.setdefault("angle", None)
if angle is None:
image = context.get_masked()
image_rgb = rgba2rgb(image) if len(image.shape) == 3 and image.shape[2] == 4 else image
grayscale = rgb2gray(image_rgb) if len(image_rgb.shape) == 3 else image_rgb
deskew_configuration = context.config["args"].setdefault("deskew", {})
skew_angle, debug_images = determine_skew_debug_images(
grayscale,
min_angle=deskew_configuration.setdefault("min_angle", schema.DESKEW_MIN_ANGLE_DEFAULT),
max_angle=deskew_configuration.setdefault("max_angle", schema.DESKEW_MAX_ANGLE_DEFAULT),
min_deviation=deskew_configuration.setdefault(
"angle_derivation", schema.DESKEW_ANGLE_DERIVATION_DEFAULT
),
sigma=deskew_configuration.setdefault("sigma", schema.DESKEW_SIGMA_DEFAULT),
num_peaks=deskew_configuration.setdefault("num_peaks", schema.DESKEW_NUM_PEAKS_DEFAULT),
angle_pm_90=deskew_configuration.setdefault("angle_pm_90", schema.DESKEW_ANGLE_PM_90_DEFAULT),
)
if skew_angle is not None:
image_status["angle"] = float(skew_angle)
angle = float(skew_angle)
assert context.root_folder
process_count = context.get_process_count()
for name, image in debug_images:
context.save_progress_images("skew", image, name, process_count, True)
if angle:
context.rotate(angle)
@Process("docrop")
def docrop(context: Context) -> None:
"""Crop an image."""
# Margin in mm
if context.config["args"].setdefault("no_crop", schema.NO_CROP_DEFAULT):
return
margin_horizontal = context.get_px_value("margin_horizontal", schema.MARGIN_HORIZONTAL_DEFAULT)
margin_vertical = context.get_px_value("margin_vertical", schema.MARGIN_VERTICAL_DEFAULT)
crop(context, int(round(margin_horizontal)), int(round(margin_vertical)))
@Process("sharpen")
def sharpen(context: Context) -> Optional[NpNdarrayInt]:
"""Sharpen an image."""
if context.config["args"].setdefault("sharpen", schema.SHARPEN_DEFAULT) is False:
return None
if context.image is None:
raise ScanToPaperlessException("The image is required")
image = cv2.GaussianBlur(context.image, (0, 0), 3)
return cast(NpNdarrayInt, cv2.addWeighted(context.image, 1.5, image, -0.5, 0))
@Process("dither")
@external
def dither(context: Context, source: str, destination: str) -> None:
"""Dither an image."""
if context.config["args"].setdefault("dither", schema.DITHER_DEFAULT) is False:
return
call(CONVERT + ["+dither", source, destination])
@Process("autorotate", True)
def autorotate(context: Context) -> None:
"""
Auto rotate an image.
Put the text in the right position.
"""
if context.config["args"].setdefault("no_auto_rotate", schema.NO_AUTO_ROTATE_DEFAULT):
return
with tempfile.NamedTemporaryFile(suffix=".png") as source:
cv2.imwrite(source.name, context.get_masked())
try:
orientation_lst = output(["tesseract", source.name, "-", "--psm", "0", "-l", "osd"]).splitlines()
orientation_lst = [e for e in orientation_lst if "Orientation in degrees" in e]
context.rotate(int(orientation_lst[0].split()[3]))
except subprocess.CalledProcessError:
print("Not text found")
def draw_line( # pylint: disable=too-many-arguments
image: NpNdarrayInt,
vertical: bool,
position: Optional[float],
value: Optional[int],
name: str,
type_: str,
color: Tuple[int, int, int],
line: Optional[Tuple[int, int, int, int]] = None,
) -> schema.Limit:
"""Draw a line on an image."""
img_len = image.shape[0 if vertical else 1]
if line is None:
assert position is not None
assert value is not None
if vertical:
cv2.rectangle(
image, (int(position) - 1, img_len), (int(position) + 1, img_len - value), color, -1
)
cv2.putText(
image, name, (int(position), img_len - value), cv2.FONT_HERSHEY_SIMPLEX, 2.0, color, 4
)
else:
cv2.rectangle(image, (0, int(position) - 1), (value, int(position) + 1), color, -1)
cv2.putText(image, name, (value, int(position)), cv2.FONT_HERSHEY_SIMPLEX, 2.0, color, 4)
else:
position = line[0] if vertical else line[1]
cv2.rectangle(
image,
(line[0] - (1 if vertical else 0), line[1] - (0 if vertical else 1)),
(line[2] + (1 if vertical else 0), line[3] + (0 if vertical else 1)),
color,
-1,
)
cv2.putText(image, name, (line[0], line[3]), cv2.FONT_HERSHEY_SIMPLEX, 2.0, color, 4)
assert position is not None
return {"name": name, "type": type_, "value": int(position), "vertical": vertical, "margin": 0}
def draw_rectangle(image: NpNdarrayInt, contour: Tuple[int, int, int, int], border: bool = True) -> None:
"""Draw a rectangle on an image."""
color = (0, 255, 0)
opacity = 0.1
x, y, width, height = contour
x = int(round(x))
y = int(round(y))
width = int(round(width))
height = int(round(height))
sub_img = image[y : y + height, x : x + width]
mask_image = np.zeros(sub_img.shape, dtype=np.uint8)
mask_image[:, :] = color
opacity_result = cv2.addWeighted(sub_img, 1 - opacity, mask_image, opacity, 1.0)
if opacity_result is not None:
image[y : y + height, x : x + width] = opacity_result
if border:
cv2.rectangle(image, (x, y), (x + 1, y + height), color, -1)
cv2.rectangle(image, (x, y), (x + width, y + 1), color, -1)
cv2.rectangle(image, (x, y + height - 1), (x + width, y + height), color, -1)
cv2.rectangle(image, (x + width - 1, y), (x + width, y + height), color, -1)
def find_lines(
image: NpNdarrayInt, vertical: bool, config: schema.LineDetection
) -> List[Tuple[int, int, int, int]]:
"""Find the lines on an image."""
edges = cv2.Canny(
image,
config.setdefault("high_threshold", schema.LINE_DETECTION_HIGH_THRESHOLD_DEFAULT),
config.setdefault("low_threshold", schema.LINE_DETECTION_LOW_THRESHOLD_DEFAULT),
apertureSize=config.setdefault("aperture_size", schema.LINE_DETECTION_APERTURE_SIZE_DEFAULT),
)
lines = cv2.HoughLinesP(
image=edges,
rho=config.setdefault("rho", schema.LINE_DETECTION_RHO_DEFAULT),
theta=np.pi / 2,
threshold=config.setdefault("threshold", schema.LINE_DETECTION_THRESHOLD_DEFAULT),
minLineLength=(image.shape[0] if vertical else image.shape[1])
/ 100
* config.setdefault("min_line_length", schema.LINE_DETECTION_MIN_LINE_LENGTH_DEFAULT),
maxLineGap=config.setdefault("max_line_gap", schema.LINE_DETECTION_MAX_LINE_GAP_DEFAULT),
)
if lines is None:
return []
lines = [line for line, in lines if (line[0] == line[2] if vertical else line[1] == line[3])]
def _key(line: Tuple[int, int, int, int]) -> int:
return line[1] - line[3] if vertical else line[2] - line[0]
return cast(List[Tuple[int, int, int, int]], sorted(lines, key=_key)[:5])
def zero_ranges(values: NpNdarrayInt) -> NpNdarrayInt:
"""Create an array that is 1 where a is 0, and pad each end with an extra 0."""
iszero: NpNdarrayInt = np.concatenate([[0], np.equal(values, 0).view(np.int8), [0]])
absdiff = np.abs(np.diff(iszero))
# Runs start and end where absdiff is 1.
ranges = np.where(absdiff == 1)[0].reshape(-1, 2)
return cast(NpNdarrayInt, ranges)
def find_limit_contour(
image: NpNdarrayInt, vertical: bool, contours: List[Tuple[int, int, int, int]]
) -> List[int]:
"""Find the contour for assisted split."""
image_size = image.shape[1 if vertical else 0]
values = np.zeros(image_size)
if vertical:
for x, _, width, height in contours:
x_int = int(round(x))
for value in range(x_int, min(x_int + width, image_size)):
values[value] += height
else:
for _, y, width, height in contours:
y_int = int(round(y))
for value in range(y_int, min(y_int + height, image_size)):
values[value] += width
ranges = zero_ranges(values)
result: List[int] = []
for ranges_ in ranges:
if ranges_[0] != 0 and ranges_[1] != image_size:
result.append(int(round(sum(ranges_) / 2)))
return result
def find_limits(
image: NpNdarrayInt, vertical: bool, context: Context, contours: List[Tuple[int, int, int, int]]
) -> Tuple[List[int], List[Tuple[int, int, int, int]]]:
"""Find the limit for assisted split."""
contours_limits = find_limit_contour(image, vertical, contours)
lines = find_lines(image, vertical, context.config["args"].setdefault("line_detection", {}))
return contours_limits, lines
def fill_limits(
image: NpNdarrayInt, vertical: bool, contours_limits: List[int], lines: List[Tuple[int, int, int, int]]
) -> List[schema.Limit]:
"""Fill the limit for assisted split."""
third_image_size = int(image.shape[0 if vertical else 1] / 3)
limits: List[schema.Limit] = []
prefix = "V" if vertical else "H"
for index, line in enumerate(lines):
limits.append(
draw_line(image, vertical, None, None, f"{prefix}L{index}", "line detection", (255, 0, 0), line)
)
for index, contour in enumerate(contours_limits):
limits.append(
draw_line(
image,
vertical,
contour,
third_image_size,
f"{prefix}C{index}",
"contour detection",
(0, 255, 0),
)
)
if not limits:
half_image_size = image.shape[1 if vertical else 0] / 2
limits.append(
draw_line(
image, vertical, half_image_size, third_image_size, f"{prefix}C", "image center", (0, 0, 255)
)
)
return limits
def find_contours(
image: NpNdarrayInt,
context: Context,
progress_count: int,
name: str,
prefix: str,
default_min_box_size: int = schema.MIN_BOX_SIZE_EMPTY_DEFAULT,
) -> List[Tuple[int, int, int, int]]:
"""Find the contours on an image."""
block_size = context.get_px_value(
f"threshold_block_size_{prefix}", schema.THRESHOLD_BLOCK_SIZE_CROP_DEFAULT
)
threshold_value_c = cast(Dict[str, int], context.config["args"]).setdefault(
f"threshold_value_c_{prefix}", schema.THRESHOLD_VALUE_C_CROP_DEFAULT
)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
block_size = int(round(block_size / 2) * 2)
# Clean the image using otsu method with the inversed binarized image
thresh = cv2.adaptiveThreshold(
gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, block_size + 1, threshold_value_c
)
if context.is_progress() and context.root_folder and context.image_name:
context.save_progress_images("threshold", thresh)
block_size_list = (block_size, 1.5, 5, 10, 15, 20, 50, 100, 200)
threshold_value_c_list = (threshold_value_c, 20, 50, 100)
for block_size2 in block_size_list:
for threshold_value_c2 in threshold_value_c_list:
block_size2 = int(round(block_size2 / 2) * 2)
thresh2 = cv2.adaptiveThreshold(
gray,
255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY_INV,
block_size2 + 1,
threshold_value_c2,
)
contours = _find_contours_thresh(image, thresh2, context, prefix, default_min_box_size)
thresh2 = cv2.cvtColor(thresh2, cv2.COLOR_GRAY2BGR)
if contours:
for contour in contours:
draw_rectangle(thresh2, contour)
context.save_progress_images(
f"{name}-threshold",
thresh2,
f"block_size_{prefix}-{block_size2}-value_c_{prefix}-{threshold_value_c2}-",
progress_count,
)
return _find_contours_thresh(image, thresh, context, prefix, default_min_box_size)
def _find_contours_thresh(
image: NpNdarrayInt, thresh: NpNdarrayInt, context: Context, prefix: str, default_min_box_size: int = 10
) -> List[Tuple[int, int, int, int]]:
min_size = context.get_px_value(f"min_box_size_{prefix}", default_min_box_size)
min_black = cast(Dict[str, int], context.config["args"]).setdefault(
f"min_box_black_{prefix}", schema.MIN_BOX_BLACK_CROP_DEFAULT
)
kernel_size = context.get_px_value(
f"contour_kernel_size_{prefix}", schema.CONTOUR_KERNEL_SIZE_CROP_DEFAULT
)
kernel_size = int(round(kernel_size / 2))
# Assign a rectangle kernel size
kernel: NpNdarrayInt = np.ones((kernel_size, kernel_size), "uint8")
par_img = cv2.dilate(thresh, kernel, iterations=5)
contours, _ = cv2.findContours(par_img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
result = []
for cnt in contours:
x, y, width, height = cv2.boundingRect(cnt)
if width > min_size and height > min_size:
contour_image = crop_image(image, x, y, width, height, context.get_background_color())
imagergb = (
rgba2rgb(contour_image)
if len(contour_image.shape) == 3 and contour_image.shape[2] == 4
else contour_image
)
contour_image = rgb2gray(imagergb) if len(imagergb.shape) == 3 else imagergb
if (1 - np.mean(contour_image)) * 100 > min_black:
result.append(
(
x + kernel_size * 2,
y + kernel_size * 2,
width - kernel_size * 4,
height - kernel_size * 4,
)
)
return result
def transform(
config: schema.Configuration,
step: schema.Step,
config_file_name: str,
root_folder: str,
) -> schema.Step:
"""Apply the transforms on a document."""
if "intermediate_error" in config:
del config["intermediate_error"]
images = []
process_count = 0
if config["args"].setdefault("assisted_split", schema.ASSISTED_SPLIT_DEFAULT):
config["assisted_split"] = []
for index, image in enumerate(step["sources"]):
image_name = f"{os.path.basename(image).rsplit('.')[0]}.png"
context = Context(config, step, config_file_name, root_folder, image_name)
if context.image_name is None:
raise ScanToPaperlessException("Image name is required")
context.image = cv2.imread(os.path.join(root_folder, image))
images_config = context.config.setdefault("images_config", {})
image_config = images_config.setdefault(context.image_name, {})
image_status = image_config.setdefault("status", {})
assert context.image is not None
image_status["size"] = list(context.image.shape[:2][::-1])
context.init_mask()
histogram(context)
level(context)
color_cut(context)
cut(context)
deskew(context)
docrop(context)
sharpen(context)
dither(context)
autorotate(context)
# Is empty ?
contours = find_contours(
context.get_masked(), context, context.get_process_count(), "is-empty", "empty"
)
if not contours:
print(f"Ignore image with no content: {image}")
continue
if config["args"].setdefault("assisted_split", schema.ASSISTED_SPLIT_DEFAULT):
assisted_split: schema.AssistedSplit = {}
name = os.path.join(root_folder, context.image_name)
source = context.save_progress_images("assisted-split", context.image, force=True)
assert source
assisted_split["source"] = source
config["assisted_split"].append(assisted_split)
destinations = [len(step["sources"]) * 2 - index, index + 1]
if index % 2 == 1:
destinations.reverse()
assisted_split["destinations"] = list(destinations)
limits = []
assert context.image is not None
contours = find_contours(context.image, context, context.get_process_count(), "limits", "limit")
vertical_limits_context = find_limits(context.image, True, context, contours)
horizontal_limits_context = find_limits(context.image, False, context, contours)
for contour_limit in contours:
draw_rectangle(context.image, contour_limit, False)
limits.extend(fill_limits(context.image, True, *vertical_limits_context))
limits.extend(fill_limits(context.image, False, *horizontal_limits_context))
assisted_split["limits"] = limits
rule_config = config["args"].setdefault("rule", {})
if rule_config.setdefault("enable", schema.RULE_ENABLE_DEFAULT):
minor_graduation_space = rule_config.setdefault(
"minor_graduation_space", schema.RULE_MINOR_GRADUATION_SPACE_DEFAULT
)
major_graduation_space = rule_config.setdefault(
"major_graduation_space", schema.RULE_MAJOR_GRADUATION_SPACE_DEFAULT
)
lines_space = rule_config.setdefault("lines_space", schema.RULE_LINES_SPACE_DEFAULT)
minor_graduation_size = rule_config.setdefault(
"minor_graduation_size", schema.RULE_MINOR_GRADUATION_SIZE_DEFAULT
)
major_graduation_size = rule_config.setdefault(
"major_graduation_size", schema.RULE_MAJOR_GRADUATION_SIZE_DEFAULT
)
graduation_color = rule_config.setdefault(
"graduation_color", schema.RULE_GRADUATION_COLOR_DEFAULT
)
lines_color = rule_config.setdefault("lines_color", schema.RULE_LINES_COLOR_DEFAULT)
lines_opacity = rule_config.setdefault("lines_opacity", schema.RULE_LINES_OPACITY_DEFAULT)
graduation_text_font_filename = rule_config.setdefault(
"graduation_text_font_filename", schema.RULE_GRADUATION_TEXT_FONT_FILENAME_DEFAULT
)
graduation_text_font_size = rule_config.setdefault(
"graduation_text_font_size", schema.RULE_GRADUATION_TEXT_FONT_SIZE_DEFAULT
)
graduation_text_font_color = rule_config.setdefault(
"graduation_text_font_color", schema.RULE_GRADUATION_TEXT_FONT_COLOR_DEFAULT
)
graduation_text_margin = rule_config.setdefault(
"graduation_text_margin", schema.RULE_GRADUATION_TEXT_MARGIN_DEFAULT
)
x = minor_graduation_space
while x < context.image.shape[1]:
if x % lines_space == 0:
sub_img = context.image[0 : context.image.shape[0], x : x + 1]
mask_image = np.zeros(sub_img.shape, dtype=np.uint8)
mask_image[:, :] = lines_color
opacity_result = cv2.addWeighted(
sub_img, 1 - lines_opacity, mask_image, lines_opacity, 1.0
)
if opacity_result is not None:
context.image[0 : context.image.shape[0], x : x + 1] = opacity_result
if x % major_graduation_space == 0:
cv2.rectangle(
context.image, (x, 0), (x + 1, major_graduation_size), graduation_color, -1
)
else:
cv2.rectangle(
context.image, (x, 0), (x + 1, minor_graduation_size), graduation_color, -1
)
x += minor_graduation_space
y = minor_graduation_space
while y < context.image.shape[0]:
if y % lines_space == 0:
sub_img = context.image[y : y + 1, 0 : context.image.shape[1]]
mask_image = np.zeros(sub_img.shape, dtype=np.uint8)
mask_image[:, :] = lines_color
opacity_result = cv2.addWeighted(
sub_img, 1 - lines_opacity, mask_image, lines_opacity, 1.0
)
if opacity_result is not None:
context.image[y : y + 1, 0 : context.image.shape[1]] = opacity_result
if y % major_graduation_space == 0:
cv2.rectangle(
context.image, (0, y), (major_graduation_size, y + 1), graduation_color, -1
)
else:
cv2.rectangle(
context.image, (0, y), (minor_graduation_size, y + 1), graduation_color, -1
)
y += minor_graduation_space
pil_image = Image.fromarray(context.image)
font = ImageFont.truetype(font=graduation_text_font_filename, size=graduation_text_font_size)
draw = ImageDraw.Draw(pil_image)
x = major_graduation_space
print(graduation_text_font_color)
while x < context.image.shape[1]:
draw.text(
(x + graduation_text_margin, major_graduation_size),
f"{x}",
fill=tuple(graduation_text_font_color),
anchor="lb",
font=font,
)
x += major_graduation_space
pil_image = pil_image.rotate(-90, expand=True)
draw = ImageDraw.Draw(pil_image)
y = major_graduation_space
while y < context.image.shape[0]:
draw.text(
(context.image.shape[0] - y + graduation_text_margin, major_graduation_size),
f"{y}",
fill=tuple(graduation_text_font_color),
anchor="lb",
font=font,
)
y += major_graduation_space
pil_image = pil_image.rotate(90, expand=True)
context.image = np.array(pil_image)
cv2.imwrite(name, context.image)
assisted_split["image"] = context.image_name
images.append(name)
else:
img2 = os.path.join(root_folder, context.image_name)
cv2.imwrite(img2, context.image)
images.append(img2)
process_count = context.process_count
progress = os.environ.get("PROGRESS", "FALSE") == "TRUE"
count = context.get_process_count()
for image in images:
if progress:
_save_progress(context.root_folder, count, "finalize", os.path.basename(image), image)
if config["args"].setdefault("colors", schema.COLORS_DEFAULT):
count = context.get_process_count()
for image in images:
call(CONVERT + ["-colors", str(config["args"]["colors"]), image, image])
if progress:
_save_progress(context.root_folder, count, "colors", os.path.basename(image), image)
if not config["args"].setdefault("jpeg", False) and config["args"].setdefault(
"run_pngquant", schema.RUN_PNGQUANT_DEFAULT
):
count = context.get_process_count()
for image in images:
with tempfile.NamedTemporaryFile(suffix=".png") as temp_file:
call(
["pngquant", f"--output={temp_file.name}"]
+ config["args"].setdefault(
"pngquant_options",
schema.PNGQUANT_OPTIONS_DEFAULT,
)
+ ["--", image],
check=False,
)
if os.path.getsize(temp_file.name) > 0:
call(["cp", temp_file.name, image])
if progress:
_save_progress(context.root_folder, count, "pngquant", os.path.basename(image), image)
if not config["args"].setdefault("jpeg", schema.JPEG_DEFAULT) and config["args"].setdefault(
"run_optipng", not config["args"]["run_pngquant"]
):
count = context.get_process_count()
for image in images:
call(["optipng", image], check=False)
if progress:
_save_progress(context.root_folder, count, "optipng", os.path.basename(image), image)
if config["args"].setdefault("jpeg", schema.JPEG_DEFAULT):
count = context.get_process_count()
new_images = []
for image in images:
name = os.path.splitext(os.path.basename(image))[0]
jpeg_img = f"{name}.jpeg"
subprocess.run( # nosec
[
"gm",
"convert",
image,
"-quality",
str(config["args"].setdefault("jpeg_quality", schema.JPEG_QUALITY_DEFAULT)),
jpeg_img,
],
check=True,
)
new_images.append(jpeg_img)
if progress:
_save_progress(context.root_folder, count, "to-jpeg", os.path.basename(image), image)
images = new_images
return {
"sources": images,
"name": "split"
if config["args"].setdefault("assisted_split", schema.ASSISTED_SPLIT_DEFAULT)
else "finalize",
"process_count": process_count,
}
def _save_progress(root_folder: Optional[str], count: int, name: str, image_name: str, image: str) -> None:
assert root_folder
name = f"{count}-{name}"
dest_folder = os.path.join(root_folder, name)
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
dest_image = os.path.join(dest_folder, image_name)
try:
call(["cp", image, dest_image])
except Exception as exception:
print(exception)
def save(context: Context, root_folder: str, image: str, folder: str, force: bool = False) -> str:
"""Save the current image in a subfolder if progress mode in enabled."""
if force or context.is_progress():
dest_folder = os.path.join(root_folder, folder)
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
dest_file = os.path.join(dest_folder, os.path.basename(image))
shutil.copyfile(image, dest_file)
return dest_file
return image
class Item(TypedDict, total=False):
"""
Image content and position.
Used to create the final document
"""
pos: int
file: IO[bytes]
def split(
config: schema.Configuration,
step: schema.Step,
root_folder: str,
) -> schema.Step:
"""Split an image using the assisted split instructions."""
process_count = 0
for assisted_split in config["assisted_split"]:
if assisted_split["limits"]:
nb_horizontal = 1
nb_vertical = 1
for limit in assisted_split["limits"]:
if limit["vertical"]:
nb_vertical += 1
else:
nb_horizontal += 1
if nb_vertical * nb_horizontal != len(assisted_split["destinations"]):
raise ScanToPaperlessException(
f"Wrong number of destinations ({len(assisted_split['destinations'])}), "
f"vertical: {nb_horizontal}, height: {nb_vertical}, image: '{assisted_split['source']}'"
)
for assisted_split in config["assisted_split"]:
if "image" in assisted_split:
image_path = os.path.join(root_folder, assisted_split["image"])
if os.path.exists(image_path):
os.unlink(image_path)
append: Dict[Union[str, int], List[Item]] = {}
transformed_images = []
for assisted_split in config["assisted_split"]:
image = assisted_split["source"]
context = Context(config, step)
width, height = (
int(e) for e in output(CONVERT + [image, "-format", "%w %h", "info:-"]).strip().split(" ")
)
horizontal_limits = [limit for limit in assisted_split["limits"] if not limit["vertical"]]
vertical_limits = [limit for limit in assisted_split["limits"] if limit["vertical"]]
last_y = 0
number = 0
for horizontal_number in range(len(horizontal_limits) + 1):
if horizontal_number < len(horizontal_limits):
horizontal_limit = horizontal_limits[horizontal_number]
horizontal_value = horizontal_limit["value"]
horizontal_margin = horizontal_limit["margin"]
else:
horizontal_value = height
horizontal_margin = 0
last_x = 0
for vertical_number in range(len(vertical_limits) + 1):
destination = assisted_split["destinations"][number]
if destination == "-" or destination is None:
if vertical_number < len(vertical_limits):
last_x = (
vertical_limits[vertical_number]["value"]
+ vertical_limits[vertical_number]["margin"]
)
else:
if vertical_number < len(vertical_limits):
vertical_limit = vertical_limits[vertical_number]
vertical_value = vertical_limit["value"]
vertical_margin = vertical_limit["margin"]
else:
vertical_value = width
vertical_margin = 0
process_file = tempfile.NamedTemporaryFile( # pylint: disable=consider-using-with
suffix=".png"
)
call(
CONVERT
+ [
"-crop",
f"{vertical_value - vertical_margin - last_x}x"
f"{horizontal_value - horizontal_margin - last_y}+{last_x}+{last_y}",
"+repage",
image,
process_file.name,
]
)
last_x = vertical_value + vertical_margin
if re.match(r"[0-9]+\.[0-9]+", str(destination)):
page, page_pos = (int(e) for e in str(destination).split("."))
else:
page = int(destination)
page_pos = 0
save(context, root_folder, process_file.name, f"{context.get_process_count()}-split")
margin_horizontal = context.get_px_value(
"margin_horizontal", schema.MARGIN_HORIZONTAL_DEFAULT
)
margin_vertical = context.get_px_value("margin_vertical", schema.MARGIN_VERTICAL_DEFAULT)
context.image = cv2.imread(process_file.name)
if not context.config["args"].setdefault("no_crop", schema.NO_CROP_DEFAULT):
crop(context, int(round(margin_horizontal)), int(round(margin_vertical)))
process_file = tempfile.NamedTemporaryFile( # pylint: disable=consider-using-with
suffix=".png"
)
cv2.imwrite(process_file.name, context.image)
save(context, root_folder, process_file.name, f"{context.get_process_count()}-crop")
if page not in append:
append[page] = []
append[page].append({"file": process_file, "pos": page_pos})
number += 1
last_y = horizontal_value + horizontal_margin
process_count = context.process_count
for page_number in sorted(append.keys()):
items: List[Item] = append[page_number]
vertical = len(horizontal_limits) == 0
if not vertical and len(vertical_limits) != 0 and len(items) > 1:
raise ScanToPaperlessException(f"Mix of limit type for page '{page_number}'")
with tempfile.NamedTemporaryFile(suffix=".png") as process_file:
call(
CONVERT
+ [e["file"].name for e in sorted(items, key=lambda e: e["pos"])]
+ [
"-background",
"#ffffff",
"-gravity",
"center",
"+append" if vertical else "-append",
process_file.name,
]
)
save(context, root_folder, process_file.name, f"{process_count}-split")
img2 = os.path.join(root_folder, f"image-{page_number}.png")
call(CONVERT + [process_file.name, img2])
transformed_images.append(img2)
process_count += 1
return {"sources": transformed_images, "name": "finalize", "process_count": process_count}
def finalize(
config: schema.Configuration,
step: schema.Step,
root_folder: str,
) -> None:
"""
Do final step on document generation.
convert in one pdf and copy with the right name in the consume folder
"""
destination = os.path.join(
os.environ.get("SCAN_CODES_FOLDER", "/scan-codes"), f"{os.path.basename(root_folder)}.pdf"
)
if os.path.exists(destination):
return
images = step["sources"]
if config["args"].setdefault("append_credit_card", schema.APPEND_CREDIT_CARD_DEFAULT):
images2 = []
for image in images:
if os.path.exists(image):
images2.append(image)
file_name = os.path.join(root_folder, "append.png")
call(CONVERT + images2 + ["-background", "#ffffff", "-gravity", "center", "-append", file_name])
# To stack vertically (img1 over img2):
# vis = np.concatenate((img1, img2), axis=0)
# To stack horizontally (img1 to the left of img2):
# vis = np.concatenate((img1, img2), axis=1)
images = [file_name]
pdf = []
for image in images:
if os.path.exists(image):
name = os.path.splitext(os.path.basename(image))[0]
file_name = os.path.join(root_folder, f"{name}.pdf")
if config["args"].setdefault("tesseract", schema.TESSERACT_DEFAULT):
with open(file_name, "w", encoding="utf8") as output_file:
process = run(
[
"tesseract",
"--dpi",
str(config["args"].setdefault("dpi", schema.DPI_DEFAULT)),
"-l",
config["args"].setdefault("tesseract_lang", schema.TESSERACT_LANG_DEFAULT),
image,
"stdout",
"pdf",
],
stdout=output_file,
)
if process.stderr:
print(process.stderr)
else:
call(CONVERT + [image, "+repage", file_name])
pdf.append(file_name)
tesseract_producer = None
if pdf:
with pikepdf.open(pdf[0]) as pdf_:
if tesseract_producer is None and pdf_.docinfo.get("/Producer") is not None:
tesseract_producer = json.loads(pdf_.docinfo.get("/Producer").to_json()) # type: ignore
if "tesseract" not in tesseract_producer.lower():
tesseract_producer = None
elif tesseract_producer.startswith("u:"):
tesseract_producer = tesseract_producer[2:]
if tesseract_producer is None:
with pdf_.open_metadata() as meta:
if "{http://purl.org/dc/elements/1.1/}producer" in meta:
tesseract_producer = meta["{http://purl.org/dc/elements/1.1/}producer"]
if "tesseract" not in tesseract_producer.lower():
tesseract_producer = None
progress = os.environ.get("PROGRESS", "FALSE") == "TRUE"
if progress:
for pdf_file in pdf:
basename = os.path.basename(pdf_file).split(".")
call(
[
"cp",
pdf_file,
os.path.join(root_folder, f"1-{'.'.join(basename[:-1])}-tesseract.{basename[-1]}"),
]
)
count = 1
with tempfile.NamedTemporaryFile(suffix=".png") as temporary_pdf:
call(["pdftk"] + pdf + ["output", temporary_pdf.name, "compress"])
if progress:
call(["cp", temporary_pdf.name, os.path.join(root_folder, f"{count}-pdftk.pdf")])
count += 1
if config["args"].setdefault("run_exiftool", schema.RUN_EXIFTOOL_DEFAULT):
call(["exiftool", "-overwrite_original_in_place", temporary_pdf.name])
if progress:
call(["cp", temporary_pdf.name, os.path.join(root_folder, f"{count}-exiftool.pdf")])
count += 1
if config["args"].setdefault("run_ps2pdf", schema.RUN_PS2PDF_DEFAULT):
with tempfile.NamedTemporaryFile(suffix=".png") as temporary_ps2pdf:
call(["ps2pdf", temporary_pdf.name, temporary_ps2pdf.name])
if progress:
call(["cp", temporary_ps2pdf.name, f"{count}-ps2pdf.pdf"])
count += 1
call(["cp", temporary_ps2pdf.name, temporary_pdf.name])
with pikepdf.open(temporary_pdf.name, allow_overwriting_input=True) as pdf_:
scan_to_paperless_meta = f"Scan to Paperless {os.environ.get('VERSION', 'undefined')}"
with pdf_.open_metadata() as meta:
meta["{http://purl.org/dc/elements/1.1/}creator"] = (
f"{scan_to_paperless_meta}, {tesseract_producer}"
if tesseract_producer
else scan_to_paperless_meta
)
pdf_.save(temporary_pdf.name)
if progress:
call(["cp", temporary_pdf.name, os.path.join(root_folder, f"{count}-pikepdf.pdf")])
count += 1
call(["cp", temporary_pdf.name, destination])
def process_code() -> None:
"""Detect ad add a page with the QR codes."""
for pdf_filename in glob.glob(os.path.join(os.environ.get("SCAN_CODES_FOLDER", "/scan-codes"), "*.pdf")):
destination_filename = os.path.join(
os.environ.get("SCAN_FINAL_FOLDER", "/destination"), os.path.basename(pdf_filename)
)
if os.path.exists(destination_filename):
continue
try:
_LOG.info("Processing codes for %s", pdf_filename)
code.add_codes(
pdf_filename,
destination_filename,
dpi=float(os.environ.get("SCAN_CODES_DPI", 200)),
pdf_dpi=float(os.environ.get("SCAN_CODES_PDF_DPI", 72)),
font_name=os.environ.get("SCAN_CODES_FONT_NAME", "Helvetica-Bold"),
font_size=float(os.environ.get("SCAN_CODES_FONT_SIZE", 16)),
margin_top=float(os.environ.get("SCAN_CODES_MARGIN_TOP", 0)),
margin_left=float(os.environ.get("SCAN_CODES_MARGIN_LEFT", 2)),
)
if os.path.exists(destination_filename):
# Remove the source file on success
os.remove(pdf_filename)
_LOG.info("Down processing codes for %s", pdf_filename)
except Exception as exception:
_LOG.exception("Error while processing %s: %s", pdf_filename, str(exception))
def is_sources_present(images: List[str], root_folder: str) -> bool:
"""Are sources present for the next step."""
for image in images:
if not os.path.exists(os.path.join(root_folder, image)):
print(f"Missing {root_folder} - {image}")
return False
return True
def save_config(config: schema.Configuration, config_file_name: str) -> None:
"""Save the configuration."""
yaml = YAML()
yaml.default_flow_style = False
with open(config_file_name + "_", "w", encoding="utf-8") as config_file:
yaml.dump(config, config_file)
os.rename(config_file_name + "_", config_file_name)
def _process(config_file_name: str, dirty: bool = False, print_waiting: bool = True) -> Tuple[bool, bool]:
"""Propcess one document."""
if not os.path.exists(config_file_name):
return dirty, print_waiting
root_folder = os.path.dirname(config_file_name)
if os.path.exists(os.path.join(root_folder, "error.yaml")):
return dirty, print_waiting
yaml = YAML()
yaml.default_flow_style = False
with open(config_file_name, encoding="utf-8") as config_file:
config: schema.Configuration = yaml.load(config_file.read())
if config is None:
print(config_file_name)
print("Empty config")
print_waiting = True
return dirty, print_waiting
if not is_sources_present(config["images"], root_folder):
print(config_file_name)
print("Missing images")
print_waiting = True
return dirty, print_waiting
try:
rerun = False
if "steps" not in config:
rerun = True
while config.get("steps") and not is_sources_present(config["steps"][-1]["sources"], root_folder):
config["steps"] = config["steps"][:-1]
save_config(config, config_file_name)
if os.path.exists(os.path.join(root_folder, "REMOVE_TO_CONTINUE")):
os.remove(os.path.join(root_folder, "REMOVE_TO_CONTINUE"))
print(config_file_name)
print("Rerun step")
print_waiting = True
rerun = True
if "steps" not in config or not config["steps"]:
step: schema.Step = {
"sources": config["images"],
"name": "transform",
}
config["steps"] = [step]
step = config["steps"][-1]
if is_sources_present(step["sources"], root_folder):
if os.path.exists(os.path.join(root_folder, "REMOVE_TO_CONTINUE")) and not rerun:
return dirty, print_waiting
if os.path.exists(os.path.join(root_folder, "DONE")) and not rerun:
return dirty, print_waiting
print(config_file_name)
print_waiting = True
dirty = True
done = False
next_step = None
if step["name"] == "transform":
print("Transform")
next_step = transform(config, step, config_file_name, root_folder)
elif step["name"] == "split":
print("Split")
next_step = split(config, step, root_folder)
elif step["name"] == "finalize":
print("Finalize")
finalize(config, step, root_folder)
done = True
if done and os.environ.get("PROGRESS", "FALSE") != "TRUE":
shutil.rmtree(root_folder)
else:
if next_step is not None:
config["steps"].append(next_step)
save_config(config, config_file_name)
with open(
os.path.join(root_folder, "DONE" if done else "REMOVE_TO_CONTINUE"),
"w",
encoding="utf-8",
):
pass
except Exception as exception:
print(exception)
trace = traceback.format_exc()
print(trace)
print_waiting = True
out = {"error": str(exception), "traceback": trace.split("\n")}
for attribute in ("returncode", "cmd"):
if hasattr(exception, attribute):
out[attribute] = getattr(exception, attribute)
for attribute in ("output", "stdout", "stderr"):
if hasattr(exception, attribute):
if getattr(exception, attribute):
out[attribute] = getattr(exception, attribute).decode()
yaml = YAML(typ="safe")
yaml.default_flow_style = False
try:
with open(os.path.join(root_folder, "error.yaml"), "w", encoding="utf-8") as error_file:
yaml.dump(out, error_file)
except Exception as exception2:
print(exception2)
print(traceback.format_exc())
yaml = YAML()
yaml.default_flow_style = False
with open(os.path.join(root_folder, "error.yaml"), "w", encoding="utf-8") as error_file:
yaml.dump(out, error_file)
return dirty, print_waiting
def main() -> None:
"""Process the scanned documents."""
parser = argparse.ArgumentParser("Process the scanned documents.")
parser.add_argument("config", nargs="?", help="The config file to process.")
args = parser.parse_args()
if args.config:
_process(args.config)
sys.exit()
print("Welcome to scanned images document to paperless.")
print_waiting = True
while True:
dirty = False
for config_file_name in glob.glob(
os.path.join(os.environ.get("SCAN_SOURCE_FOLDER", "/source"), "*/config.yaml")
):
dirty, print_waiting = _process(config_file_name, dirty, print_waiting)
if not dirty:
process_code()
sys.stdout.flush()
if not dirty:
if print_waiting:
print_waiting = False
print("Waiting...")
time.sleep(30)
if __name__ == "__main__":
main()
|
/scan_to_paperless-1.25.0-py3-none-any.whl/scan_to_paperless/process.py
| 0.659844 | 0.23053 |
process.py
|
pypi
|
import attr
from collections import Counter
from licensedcode.tokenize import query_tokenizer
# All values of match_coverage less than this value are taken as
# `near-perfect-match-coverage` cases
NEAR_PERFECT_MATCH_COVERAGE_THR = 100
# Values of match_coverage less than this are taken as `imperfect-match-coverage` cases
IMPERFECT_MATCH_COVERAGE_THR = 95
# How many Lines in between has to be present for two matches being of a different group
# (i.e. and therefore, different rule)
LINES_THRESHOLD = 4
# Threshold Values of start line and rule length for a match to likely be a false positive
# (more than the start_line threshold and less than the rule_length threshold)
FALSE_POSITIVE_START_LINE_THRESHOLD = 1000
FALSE_POSITIVE_RULE_LENGTH_THRESHOLD = 3
# Whether to Use the NLP BERT Models
USE_LICENSE_CASE_BERT_MODEL = False
USE_FALSE_POSITIVE_BERT_MODEL = False
ISSUE_CASES_VERSION = 0.1
ISSUE_CATEGORIES = {
"imperfect-match-coverage": (
"The license detection is inconclusive with high confidence, because only "
"a small part of the rule text is matched."
),
"near-perfect-match-coverage": (
"The license detection is conclusive with a medium confidence because "
"because most, but not all of the rule text is matched."
),
"extra-words": (
"The license detection is conclusive with high confidence because all the "
"rule text is matched, but some unknown extra words have been inserted in "
"the text."
),
"false-positive": (
"The license detection is inconclusive, and is unlikely to be about a "
"license as a piece of code/text is detected.",
),
"unknown-match": (
"The license detection is inconclusive, as the license matches have "
"been matched to rules having unknown as their license key"
),
}
@attr.s
class IssueType:
ANALYSIS_CONFIDENCES = {
"high": "High confidence",
"medium": "Medium confidence",
"low": "Low confidence",
}
classification_id = attr.ib(type=str)
classification_description = attr.ib(type=str)
analysis_confidence = attr.ib(
type=str, validator=attr.validators.in_(ANALYSIS_CONFIDENCES)
)
is_license_text = attr.ib(default=False)
is_license_notice = attr.ib(default=False)
is_license_tag = attr.ib(default=False)
is_license_reference = attr.ib(default=False)
is_license_intro = attr.ib(default=False)
is_suggested_matched_text_complete = attr.ib(default=True)
ISSUE_TYPES_BY_CLASSIFICATION = {
"text-legal-lic-files": IssueType(
is_license_text=True,
classification_id="text-legal-lic-files",
classification_description=(
"The matched text is present in a file whose name is a known "
"legal filename."
),
analysis_confidence="high",
is_suggested_matched_text_complete=False,
),
"text-non-legal-lic-files": IssueType(
is_license_text=True,
classification_id="text-non-legal-lic-files",
classification_description=(
"The matched license text is present in a file whose name is not "
"a known legal filename."
),
analysis_confidence="medium",
is_suggested_matched_text_complete=False,
),
"text-lic-text-fragments": IssueType(
is_license_text=True,
classification_id="text-lic-text-fragments",
classification_description=(
"Only parts of a larger license text are detected."
),
analysis_confidence="low",
is_suggested_matched_text_complete=False,
),
"notice-and-or-with-notice": IssueType(
is_license_notice=True,
classification_id="notice-and-or-with-notice",
classification_description=(
"A notice with a complex license expression "
"(i.e. exceptions, choices or combinations)."
),
analysis_confidence="medium",
),
"notice-single-key-notice": IssueType(
is_license_notice=True,
classification_id="notice-single-key-notice",
classification_description="A notice with a single license.",
analysis_confidence="high",
),
"notice-has-unknown-match": IssueType(
is_license_notice=True,
classification_id="notice-has-unknown-match",
classification_description=(
"License notices with unknown licenses detected."
),
analysis_confidence="medium",
),
"notice-false-positive": IssueType(
is_license_notice=True,
classification_id="notice-has-unknown-match",
classification_description=(
"A piece of code/text is incorrectly detected as a license."
),
analysis_confidence="medium",
),
"tag-tag-coverage": IssueType(
is_license_tag=True,
classification_id="tag-tag-coverage",
classification_description="A part of a license tag is detected",
analysis_confidence="high",
),
"tag-other-tag-structures": IssueType(
is_license_tag=True,
classification_id="tag-other-tag-structures",
classification_description=(
"A new/common structure of tags are detected with scope for being "
"handled differently."
),
analysis_confidence="high",
),
"tag-false-positive": IssueType(
is_license_tag=True,
classification_id="tag-other-tag-structures",
classification_description=(
"A piece of code/text is incorrectly detected as a license."
),
analysis_confidence="medium",
),
# `reference` sub-cases
"reference-lead-in-or-unknown-refs": IssueType(
is_license_reference=True,
classification_id="reference-lead-in-or-unknown-refs",
classification_description=(
"Lead-ins to known license references are detected."
),
analysis_confidence="medium",
),
"reference-low-coverage-refs": IssueType(
is_license_reference=True,
classification_id="reference-low-coverage-refs",
classification_description="License references with a incomplete match.",
analysis_confidence="medium",
),
"reference-to-local-file": IssueType(
is_license_reference=True,
classification_id="reference-to-local-file",
classification_description=(
"Matched to an unknown rule as the license information is present in "
"another file, which is referred to in this matched piece of text."
),
analysis_confidence="high",
),
"reference-false-positive": IssueType(
is_license_reference=True,
classification_id="reference-false-positive",
classification_description=(
"A piece of code/text is incorrectly detected as a license."
),
analysis_confidence="medium",
),
"intro-unknown-match": IssueType(
is_license_reference=True,
classification_id="intro-unknown-match",
classification_description=(
"A piece of common introduction to a license text/notice/reference is "
"detected."
),
analysis_confidence="medium",
),
}
@attr.s
class SuggestedLicenseMatch:
"""
After analysis of a license detection issue, an alternate license detection is
suggested which attempts to rectify the issues.
"""
license_expression = attr.ib(type=str)
matched_text = attr.ib(type=str)
@attr.s
class FileRegion:
"""
A file has one or more file-regions, which are separate regions of the file
containing some license information (separated by code/text/others in between),
and identified by a start line and an end line.
"""
path = attr.ib(type=str)
start_line = attr.ib(type=int)
end_line = attr.ib(type=int)
@attr.s
class LicenseDetectionIssue:
"""
An LicenseDetectionIssue object corresponds to a license detection issue for a
file-region, containing one/multiple license matches.
A file has one or more file-regions, which are separate regions of the file
containing some license information (separated by code/text/others in between),
and identified by a start line and an end line.
"""
issue_category = attr.ib(type=str, validator=attr.validators.in_(ISSUE_CATEGORIES))
issue_description = attr.ib(type=str)
issue_type = attr.ib()
suggested_license = attr.ib()
original_licenses = attr.ib()
file_regions = attr.ib(default=attr.Factory(list))
def to_dict(self, is_summary=True):
if is_summary:
return attr.asdict(
self, filter=lambda attr, value: attr.name not in ["file_regions"],
)
else:
return attr.asdict(
self, filter=lambda attr, value: attr.name not in ["path"],
)
@property
def identifier(self):
"""
This is an identifier for a issue, based on it's underlying license matches.
"""
data = []
for license_match in self.original_licenses:
identifier = (license_match.rule_identifier, license_match.match_coverage,)
data.append(identifier)
return tuple(data)
@property
def identifier_for_unknown_intro(self):
"""
This is an identifier for a issue, which is an unknown license intro,
based on it's underlying license matches.
"""
data = []
for license_match in self.original_licenses:
tokenized_matched_text = tuple(query_tokenizer(license_match.matched_text))
identifier = (
license_match.rule_identifier,
license_match.match_coverage,
tokenized_matched_text,
)
data.append(identifier)
return tuple(data)
@staticmethod
def format_analysis_result(issue_category, issue_type, license_matches, path):
"""
Format the analysis result to generate an LicenseDetectionIssue object for
this license detection issue.
:param issue_category: str
One of ISSUE_CATEGORIES.
:param issue_type: str
One of ISSUE_TYPES_BY_CLASSIFICATION.
:param license_matches: list
All matches for a license detection issue (for a file-region), each a
LicenseMatch object.
:param path: str
Path of the resource where the license issue exists
"""
# Don't generate LicenseDetectionIssue objects for correct License Detections.
if issue_category == "correct-license-detection":
return None
start_line, end_line = get_start_end_line(license_matches)
license_expression, matched_text = get_license_match_suggestion(
license_matches, issue_category, issue_type
)
license_detection_issue = LicenseDetectionIssue(
issue_category=issue_category,
issue_description=ISSUE_CATEGORIES[issue_category],
issue_type=ISSUE_TYPES_BY_CLASSIFICATION[issue_type],
suggested_license=SuggestedLicenseMatch(
license_expression=license_expression, matched_text=matched_text
),
original_licenses=license_matches,
file_regions=[FileRegion(
path=path,
start_line=start_line,
end_line=end_line,
)],
)
modify_analysis_confidence(license_detection_issue)
return license_detection_issue
@staticmethod
def from_license_matches(
license_matches, path=None, is_license_text=False, is_legal=False,
):
"""
Group `license_matches` into file-regions and for each license detection issue,
return a LicenseDetectionIssue object containing the issue, issue type and
suggested license match, with the original reported matches.
:param license_matches: list
List of LicenseMatch.
:param path: str
Path of the resource where the license issue exists
:param is_license_text: bool
True if most of a file is license text.
:param is_legal: bool
True if the file has a common legal name.
"""
if not license_matches:
return []
if not is_license_text:
groups_of_license_matches = group_matches(license_matches)
else:
groups_of_license_matches = [license_matches]
return analyze_matches(
groups_of_license_matches, path, is_license_text, is_legal
)
def is_correct_detection(license_matches):
"""
Return True if all the license matches in a file-region are correct
license detections, as they are either SPDX license tags, or the file content has
a exact match with a license hash.
:param license_matches: list
List of LicenseMatch.
"""
matchers = (license_match.matcher for license_match in license_matches)
return all(matcher in ("1-hash", "4-spdx-id") for matcher in matchers)
def is_match_coverage_less_than_threshold(license_matches, threshold):
"""
Returns True if any of the license matches in a file-region has a `match_coverage`
value below the threshold.
:param license_matches: list
List of LicenseMatch.
:param threshold: int
A `match_coverage` threshold value in between 0-100
"""
coverage_values = (
license_match.match_coverage for license_match in license_matches
)
return any(coverage_value < threshold for coverage_value in coverage_values)
def calculate_query_coverage_coefficient(license_match):
"""
Calculates a `query_coverage_coefficient` value for that match. For a match:
1. If this value is 0, i.e. `score`==`match_coverage`*`rule_Relevance`, then
there are no extra words in that license match.
2. If this value is a +ve number, i.e. `score`!=`match_coverage`*`rule_Relevance`,
then there are extra words in that match.
:param matched_license: LicenseMatch.
"""
score_coverage_relevance = (
license_match.match_coverage * license_match.rule_relevance
) / 100
return score_coverage_relevance - license_match.score
def is_extra_words(license_matches):
"""
Return True if any of the license matches in a file-region has extra words. Having
extra words means contains a perfect match with a license/rule, but there are some
extra words in addition to the matched text.
:param license_matches: list
List of LicenseMatch.
"""
match_query_coverage_diff_values = (
calculate_query_coverage_coefficient(license_match)
for license_match in license_matches
)
return any(
match_query_coverage_diff_value > 0
for match_query_coverage_diff_value in match_query_coverage_diff_values
)
def is_false_positive(license_matches):
"""
Return True if all of the license matches in a file-region are false positives.
False Positive occurs when other text/code is falsely matched to a license rule,
because it matches with a one-word license rule with it's `is_license_tag` value as
True. Note: Usually if it's a false positive, there's only one match in that region.
:param license_matches: list
List of LicenseMatch.
"""
start_line_region = min(
license_match.start_line for license_match in license_matches
)
match_rule_length_values = [
license_match.rule_length for license_match in license_matches
]
if start_line_region > FALSE_POSITIVE_START_LINE_THRESHOLD and any(
match_rule_length_value <= FALSE_POSITIVE_RULE_LENGTH_THRESHOLD
for match_rule_length_value in match_rule_length_values
):
return True
match_is_license_tag_flags = (
license_match.is_license_tag for license_match in license_matches
)
return all(
(is_license_tag_flag and match_rule_length == 1)
for is_license_tag_flag, match_rule_length in zip(
match_is_license_tag_flags, match_rule_length_values
)
)
def has_unknown_matches(license_matches):
"""
Return True if any on the license matches has a license match with an
`unknown` rule identifier.
:param license_matches: list
List of LicenseMatch.
"""
match_rule_identifiers = (
license_match.rule_identifier for license_match in license_matches
)
match_rule_license_expressions = (
license_match.license_expression for license_match in license_matches
)
return any(
"unknown" in identifier for identifier in match_rule_identifiers
) or any(
"unknown" in license_expression
for license_expression in match_rule_license_expressions
)
def get_analysis_for_region(license_matches):
"""
Analyse license matches from a file-region, and determine if the license detection
in that file region is correct or it is wrong/partially-correct/false-positive or
has extra words.
:param license_matches: list
List of LicenseMatch.
"""
# Case where all matches have `matcher` as `1-hash` or `4-spdx-id`
if is_correct_detection(license_matches):
return "correct-license-detection"
# Case where at least one of the matches have `match_coverage`
# below IMPERFECT_MATCH_COVERAGE_THR
elif is_match_coverage_less_than_threshold(
license_matches, IMPERFECT_MATCH_COVERAGE_THR
):
return "imperfect-match-coverage"
# Case where at least one of the matches have `match_coverage`
# below NEAR_PERFECT_MATCH_COVERAGE_THR
elif is_match_coverage_less_than_threshold(
license_matches, NEAR_PERFECT_MATCH_COVERAGE_THR
):
return "near-perfect-match-coverage"
# Case where at least one of the match have extra words
elif is_extra_words(license_matches):
return "extra-words"
# Case where even though the matches have perfect coverage, they have
# matches with `unknown` rule identifiers
elif has_unknown_matches(license_matches):
return "unknown-match"
# Case where the match is a false positive
elif is_false_positive(license_matches):
if not USE_FALSE_POSITIVE_BERT_MODEL:
return "false-positive"
else:
return determine_false_positive_case_using_bert(license_matches)
# Cases where Match Coverage is a perfect 100 for all matches
else:
return "correct-license-detection"
def is_license_case(license_matches, license_case):
"""
Get the type of license_match_case for a group of license matches in a file-region.
:param license_matches: list
List of LicenseMatch.
:param license_case: string
One of the 4 boolean flag attributes of a match, i.e. is it text/notice/tag/ref
"""
match_is_license_case_flags = (
getattr(license_match, license_case) for license_match in license_matches
)
return any(
match_is_license_case for match_is_license_case in match_is_license_case_flags
)
def get_issue_rule_type(license_matches, is_license_text, is_legal):
"""
For a group of matches (with some issue) in a file-region, classify them into
groups according to their potential license rule type (text/notice/tag/reference).
:param license_matches: list
List of LicenseMatch.
:param is_license_text: bool
:param is_legal: bool
"""
# Case where at least one of the matches is matched to a license `text` rule.
if (
is_license_text
or is_legal
or is_license_case(license_matches, "is_license_text")
):
return "text"
# Case where at least one of the matches is matched to a license `notice` rule.
elif is_license_case(license_matches, "is_license_notice"):
return "notice"
# Case where at least one of the matches is matched to a license `tag` rule.
elif is_license_case(license_matches, "is_license_tag"):
return "tag"
# Case where at least one of the matches is matched to a license `reference` rule.
elif is_license_case(license_matches, "is_license_reference"):
return "reference"
# Case where the matches are matched to a license `intro` rule.
elif is_license_case(license_matches, "is_license_intro"):
return "intro"
def get_license_text_issue_type(is_license_text, is_legal):
"""
Classifies the license detection issue into one of ISSUE_TYPES_BY_CLASSIFICATION,
where it is a license text.
"""
if is_legal:
if is_license_text:
return "text-legal-lic-files"
else:
return "text-lic-text-fragments"
else:
return "text-non-legal-lic-files"
def get_license_notice_issue_type(license_matches, issue_category):
"""
Classifies the license detection issue into one of ISSUE_TYPES_BY_CLASSIFICATION,
where it is a license notice.
"""
license_expression_connectors = ["AND", "OR", "WITH"]
match_rule_license_expressions = [
license_match.license_expression for license_match in license_matches
]
if issue_category == "false-positive":
return "notice-false-positive"
elif issue_category == "unknown-match":
return "notice-has-unknown-match"
elif all(
any(
license_expression_connector in license_expression
for license_expression_connector in license_expression_connectors
)
for license_expression in match_rule_license_expressions
):
return "notice-and-or-with-notice"
elif any(
"unknown" in license_expression
for license_expression in match_rule_license_expressions
):
return "notice-has-unknown-match"
else:
return "notice-single-key-notice"
def get_license_tag_issue_type(issue_category):
"""
Classifies the license detection issue into one of ISSUE_TYPES_BY_CLASSIFICATION,
where it is a license tag.
"""
if issue_category == "false-positive":
return "tag-false-positive"
else:
return "tag-tag-coverage"
def get_license_reference_issue_type(license_matches, issue_category):
"""
Classifies the license detection issue into one of ISSUE_TYPES_BY_CLASSIFICATION,
where it is a license reference.
"""
match_rule_identifiers = [
license_match.rule_identifier for license_match in license_matches
]
if issue_category == "false-positive":
return "reference-false-positive"
elif any("lead" in identifier for identifier in match_rule_identifiers) or any(
"unknown" in identifier for identifier in match_rule_identifiers
) or issue_category == "unknown-match":
return "reference-lead-in-or-unknown-refs"
else:
return "reference-low-coverage-refs"
def get_issue_type(
license_matches, is_license_text, is_legal, issue_category, issue_rule_type
):
"""
Classifies the license detection issue into one of ISSUE_TYPES_BY_CLASSIFICATION
"""
if issue_rule_type == "text":
return get_license_text_issue_type(is_license_text, is_legal)
elif issue_rule_type == "notice":
return get_license_notice_issue_type(license_matches, issue_category)
elif issue_rule_type == "tag":
return get_license_tag_issue_type(issue_category)
elif issue_rule_type == "reference":
return get_license_reference_issue_type(license_matches, issue_category)
elif issue_rule_type == "intro":
return "intro-unknown-match"
def get_issue_rule_type_using_bert(license_matches):
raise NotImplementedError
def determine_false_positive_case_using_bert(license_matches):
raise NotImplementedError
def merge_string_without_overlap(string1, string2):
"""
Merge two Strings that doesn't have any common substring.
"""
return string1 + "\n" + string2
def merge_string_with_overlap(string1, string2):
"""
Merge two Strings that has a common substring.
"""
idx = 0
while not string2.startswith(string1[idx:]):
idx += 1
return string1[:idx] + string2
def get_start_end_line(license_matches):
"""
Returns start and end line for a license detection issue, from the
license match(es).
"""
start_line = min([license_match.start_line for license_match in license_matches])
end_line = max([license_match.end_line for license_match in license_matches])
return start_line, end_line
def predict_license_expression(license_matches):
"""
Return the best-effort predicted license expression given a list of LicenseMatch
objects.
"""
unknown_expressions = ['unknown', 'warranty-disclaimer']
license_expressions = (
license_match.license_expression for license_match in license_matches
)
known_expressions = [
le for le in license_expressions if le not in unknown_expressions
]
if not known_expressions:
return "unknown"
license_expressions_counts = dict(Counter(known_expressions).most_common())
highest_count = list(license_expressions_counts.values())[0]
top_license_expressions = [
expression
for expression, count in license_expressions_counts.items()
if count == highest_count
]
if len(top_license_expressions) == 1:
return top_license_expressions[0]
top_license_matches = [
license_match
for license_match in license_matches
if license_match.license_expression in top_license_expressions
]
max_match_length = max([
license_match.matched_length
for license_match in top_license_matches
])
license_expression_prediction = next(
license_match.license_expression
for license_match in top_license_matches
if license_match.matched_length is max_match_length
)
return license_expression_prediction
def get_license_match_suggestion(license_matches, issue_category, issue_type):
"""
Suggest a license match rectifying the license detection issue.
:param license_matches:
List of LicenseMatch.
:param issue_category:
One of LicenseDetectionIssue.ISSUE_CATEGORIES.
:param issue_type:
One of LicenseDetectionIssue.ISSUE_TYPES_BY_CLASSIFICATION
:returns license_expression:
A complete license expression from all the licenses matches.
:returns matched_text:
A complete matched text from all the licenses matches.
"""
license_expression = None
matched_text = None
if issue_category != "correct-license-detection":
if len(license_matches) == 1:
[match] = license_matches
license_expression = match.license_expression
matched_text = match.matched_text
else:
if issue_type == "notice-and-or-with-notice":
match = license_matches[0]
license_expression = match.license_expression
matched_text = match.matched_text
else:
license_expression = predict_license_expression(license_matches)
matched_text = consolidate_matches(license_matches)
return license_expression, matched_text
def consolidate_matches(license_matches):
"""
Create a complete matched_text from a group of Matches, which are in the same
license detection issue, i.e. in the same file-region.
The license matches are incorrect matches and has fragments of a larger text,
but, may not contain the entire text even after consolidating.
"""
matched_text = None
string_end_line = None
is_first_group = True
for license_match in license_matches:
if is_first_group:
string_end_line = license_match.end_line
matched_text = license_match.matched_text
is_first_group = False
continue
else:
present_start_line = license_match.start_line
present_end_line = license_match.end_line
present_text = license_match.matched_text
# Case: Has a line-overlap
if string_end_line == present_start_line:
matched_text = merge_string_with_overlap(matched_text, present_text)
string_end_line = present_end_line
# Case: Boundary doesn't overlap but just beside
elif string_end_line < present_start_line:
matched_text = merge_string_without_overlap(matched_text, present_text)
string_end_line = present_end_line
# Case: Deep Overlaps (Of more than one lines)
elif string_end_line > present_start_line:
if string_end_line < present_end_line:
matched_text = merge_string_with_overlap(matched_text, present_text)
string_end_line = present_end_line
return matched_text
def analyze_region_for_license_scan_issues(license_matches, is_license_text, is_legal):
"""
On a group of license matches (grouped on the basis of location in file),
perform steps of analysis to determine if the license match is correct or if it has
any issues. In case of issues, divide the issues into groups of commonly occurring
license detection issues.
:param license_matches: list
List of LicenseMatch.
:param is_license_text: bool
:param is_legal: bool
:return issue_category: str
One of LicenseDetectionIssue.ISSUE_CATEGORIES.
:returns issue_type: str
One of LicenseDetectionIssue.ISSUE_TYPES_BY_CLASSIFICATION
"""
issue_category = get_analysis_for_region(license_matches)
issue_type = None
# If one of the matches in the file-region has issues, classify the type of issue
# into further types of issues
if issue_category != "correct-license-detection":
if not USE_LICENSE_CASE_BERT_MODEL:
issue_rule_type = get_issue_rule_type(
license_matches,
is_license_text,
is_legal,
)
else:
issue_rule_type = get_issue_rule_type_using_bert(license_matches)
issue_type = get_issue_type(
license_matches,
is_license_text,
is_legal,
issue_category,
issue_rule_type,
)
return issue_category, issue_type
def modify_analysis_confidence(license_detection_issue):
"""
Modify the analysis confidence to a more precise one from the default confidences
in LicenseDetectionIssue.ISSUE_TYPES_BY_CLASSIFICATION, by using more analysis
information.
:param license_detection_issue:
A LicenseDetectionIssue object.
"""
if (
license_detection_issue.issue_category == "extra-words"
or license_detection_issue.issue_category == "near-perfect-match-coverage"
):
license_detection_issue.issue_type.analysis_confidence = "high"
elif (
license_detection_issue.issue_category == "false-positive"
or license_detection_issue.issue_category == "unknown-match"
):
license_detection_issue.issue_type.analysis_confidence = "low"
def group_matches(license_matches, lines_threshold=LINES_THRESHOLD):
"""
Given a list of `matches` yield lists of grouped matches together where each
group is less than `lines_threshold` apart.
Each item in `matches` is a ScanCode matched license using the structure
that is found in the JSON scan results.
:param license_matches: list
List of LicenseMatch.
:param lines_threshold: int
The maximum space that can exist between two matches for them to be
considered in the same file-region.
:returns: list generator
A list of groups, where each group is a list of matches in the same file-region.
"""
group_of_license_matches = []
for license_match in license_matches:
if not group_of_license_matches:
group_of_license_matches.append(license_match)
continue
previous_match = group_of_license_matches[-1]
is_in_group = license_match.start_line <= previous_match.end_line + lines_threshold
if is_in_group:
group_of_license_matches.append(license_match)
continue
else:
yield group_of_license_matches
group_of_license_matches = [license_match]
yield group_of_license_matches
def analyze_matches(groups_of_license_matches, path, is_license_text, is_legal):
"""
Analyze all license detection issues in a file, for license detection issues.
:param all_groups_of_license_matches: list generator
A list of groups, where each group is a list of matches (in a file-region).
:param path: str
Path of the resource where the license issue exists
:param is_license_text: bool
:param is_legal: bool
:returns: list generator
A list of LicenseDetectionIssue objects one for each license detection
issue.
"""
for group_of_license_matches in groups_of_license_matches:
issue_category, issue_type = analyze_region_for_license_scan_issues(
license_matches=group_of_license_matches,
is_license_text=is_license_text,
is_legal=is_legal,
)
license_detection_issue = LicenseDetectionIssue.format_analysis_result(
issue_category, issue_type, group_of_license_matches, path
)
if license_detection_issue:
yield license_detection_issue
|
/scancode-analyzer-21.4.7.tar.gz/scancode-analyzer-21.4.7/src/scancode_analyzer/license_analyzer.py
| 0.784154 | 0.361559 |
license_analyzer.py
|
pypi
|
import pandas as pd
from licensedcode import models
# Threshold of Words which is used in `Rule.compute_relevance` in `scancode.licensedcode.models.py`
THRESHOLD_COMPUTE_RELEVANCE = 18.0
# Different Rule Attribute Groups
boolean_rule_attributes = [
"is_license_reference", "is_license_text", "is_license_notice",
"is_license_tag", "is_license_intro", "only_known_words", "is_false_positive"
]
ignorables = [
'ignorable_copyrights', 'ignorable_holders', 'ignorable_authors',
'ignorable_emails', 'ignorable_urls'
]
other_optionals = ['referenced_filenames', 'notes']
class LicenseRulesInfo:
"""
Contains all Licenses and License Rules related information, loaded from scancode.
"""
def __init__(
self,
rules_folder=models.rules_data_dir,
licenses_folder=models.licenses_data_dir
):
self.rule_df = None
self.lic_df = None
self.load_scancode_rules(rules_folder)
self.load_scancode_licenses(licenses_folder)
self.modify_lic_rule_info()
def load_scancode_rules(self, rules_folder):
"""
Loads all scancode rules into a Dataframe.
"""
rules = list(models.load_rules(rules_folder))
rules_df = []
for rule in rules:
df = pd.DataFrame.from_dict(rule.to_dict(), orient='index').T
rule_text = rule.text()
df["rule_filename"] = rule.data_file.split("/")[-1][:-4]
df["text"] = rule_text
df["words_count"] = len(rule_text.split())
rules_df.append(df)
self.rule_df = pd.concat(rules_df)
def load_scancode_licenses(self, licenses_folder):
"""
Loads all scancode licenses into a Dataframe.
"""
licenses = models.load_licenses(licenses_folder)
licenses_df = []
for license in licenses.values():
df = pd.DataFrame.from_dict(license.to_dict(), orient='index').T
license_text = license.text
df["license_filename"] = license.data_file.split("/")[-1][:-4]
df["text"] = license_text
df["words_count"] = len(license_text.split())
licenses_df.append(df)
self.lic_df = pd.concat(licenses_df)
@staticmethod
def rules_compute_relevance(rule_df, threshold=THRESHOLD_COMPUTE_RELEVANCE):
"""
Compute all the Relevance Values of Rules where it isn't given explicitly.
:param rule_df: pd.DataFrame
DataFrame with all Rule Information.
:param threshold: float
The threshold value, above which rules have a relevance of 100
"""
rule_df.loc[rule_df["is_false_positive"] is True, "relevance"] = 100
rule_df.loc[rule_df["words_count"] >= threshold, "relevance"] = 100
relevance_of_one_word = round((1 / threshold) * 100, 2)
rule_df.loc[
rule_df["relevance"].isna(),
"relevance"
] = rule_df.loc[
rule_df["relevance"].isna(),
"words_count"
] * relevance_of_one_word
@staticmethod
def rules_compute_min_cov(rule_df):
"""
Compute all the Minimum Coverage Values of Rules where it isn't given explicitly.
:param rule_df: pd.DataFrame
DataFrame with all Rule Information.
"""
rule_df.loc[rule_df["minimum_coverage"].isna(), "minimum_coverage"] = 0
@staticmethod
def licences_compute_min_cov(lic_df):
"""
Compute all the Minimum Coverage Values of Licenses where it isn't given explicitly.
:param lic_df: pd.DataFrame
DataFrame with all License Information.
"""
lic_df.loc[lic_df["minimum_coverage"].isna(), "minimum_coverage"] = 0
def modify_lic_rule_info(self):
"""
Formats and Modifies Rule/License Information.
:param rule_df: pd.DataFrame
DataFrame with all Rules Information.
:param lic_df: pd.DataFrame
DataFrame with all License Information.
"""
# Convert NaN Values in Boolean Columns to False, making it a boolean column, not object
self.rule_df.fillna(
{x: False for x in boolean_rule_attributes},
inplace=True
)
self.rules_compute_relevance(self.rule_df)
self.rules_compute_min_cov(self.rule_df)
self.licences_compute_min_cov(self.lic_df)
|
/scancode-analyzer-21.4.7.tar.gz/scancode-analyzer-21.4.7/src/scancode_analyzer/load_data.py
| 0.678647 | 0.271741 |
load_data.py
|
pypi
|
from collections import Counter
import attr
"""
Data Format and example output of analyzer summary, having unique
license detection issues and statistics.
codebase_level:
- license_detection_issues_summary: SummaryLicenseIssues
- unique_license_detection_issues: list of UniqueIssue
- issue_categoryentifier: 1
- files: list of FileRegions
- path: "path/to/occurrence"
- start_line: 1
- end_line: 2
- license_detection_issue: LicenseDetectionIssue
- statistics: StatisticsLicenseIssues
- total_files_with_license: 43
- total_files_with_license_detection_issues: 17
- total_unique_license_detection_issues: 3
- issue_category_counts:
- imperfect-match-coverage: 2
- unknown-match: 1
- issue_classification_id_counts:
- text-lic-text-fragments: 1
- notice-has-unknown-match: 1
- reference-low-coverage-refs: 1
- license_info_type_flags_counts:
- license_text: 1
- license_notice: 1
- license_reference: 1
- analysis_confidence_counts:
- high: 1
- medium: 2
- low: 0
"""
@attr.s
class SummaryLicenseIssues:
"""
Codebase level summary of License Detection Issues.
"""
statistics = attr.ib()
unique_license_detection_issues = attr.ib(factory=list)
def to_dict(self):
return attr.asdict(self)
@staticmethod
def summarize(license_issues, count_has_license, count_files_with_issues):
"""
Generate summary with Unique Issues and Statistics.
"""
unique_issues = UniqueIssue.get_unique_issues(
license_issues,
)
statistics=StatisticsLicenseIssues.generate_statistics(
license_issues=license_issues,
count_unique_issues=len(unique_issues),
count_has_license=count_has_license,
count_files_with_issues=count_files_with_issues,
)
return SummaryLicenseIssues(
unique_license_detection_issues=unique_issues,
statistics=statistics,
)
@attr.s
class StatisticsLicenseIssues:
"""
All statistics on License Detection Issues from the analysis.
"""
total_files_with_license = attr.ib(type=int)
total_files_with_license_detection_issues = attr.ib(type=int)
total_unique_license_detection_issues = attr.ib(type=int, default=0)
# Stats on analyzer.LicenseDetectionIssue.issue_category
issue_category_counts = attr.ib(factory=dict)
# Stats on analyzer.LicenseDetectionIssue.issue_type.classification_id
issue_classification_id_counts = attr.ib(factory=dict)
# Stats on analyzer.LicenseDetectionIssue.issue_type.analysis_confidence
analysis_confidence_counts = attr.ib(factory=dict)
# Stats on the 4 flags of analyzer.LicenseDetectionIssue.issue_type
# i.e. is_license['text','notice','tag','reference']
license_info_type_counts = attr.ib(factory=dict)
@staticmethod
def generate_statistics(
license_issues, count_unique_issues, count_has_license, count_files_with_issues
):
"""
Get all unique license detection issues for the scan
and their occurances, from all the issues.
:param license_issues: list of LicenseDetectionIssue
:param count_unique_issues: int
Number of unique license detection issues
:param count_has_license int:
Number of files having license information
:param count_files_with_issues: int
Number of files having license detection issues
:returns UniqueLicenseIssues: list of UniqueIssue
"""
issue_statistics = dict(Counter((
issue.issue_category for issue in license_issues
)))
issue_type_statistics = dict(Counter((
issue.issue_type.classification_id
for issue in license_issues
)))
flags_statistics = {
"license_text": sum((
issue.issue_type.is_license_text
for issue in license_issues
)),
"license_notice": sum((
issue.issue_type.is_license_notice
for issue in license_issues
)),
"license_tag": sum((
issue.issue_type.is_license_tag
for issue in license_issues
)),
"license_reference": sum((
issue.issue_type.is_license_reference
for issue in license_issues
)),
}
license_info_type_statistics = {
flag: count
for flag, count in flags_statistics.items()
if count
}
analysis_confidence_statistics = dict(Counter((
issue.issue_type.analysis_confidence for issue in license_issues
)))
return StatisticsLicenseIssues(
total_files_with_license=count_has_license,
total_files_with_license_detection_issues=count_files_with_issues,
total_unique_license_detection_issues=count_unique_issues,
issue_category_counts=issue_statistics,
issue_classification_id_counts=issue_type_statistics,
license_info_type_counts=license_info_type_statistics,
analysis_confidence_counts=analysis_confidence_statistics,
)
@attr.s
class UniqueIssue:
"""
An unique License Detection Issue.
"""
unique_identifier = attr.ib(type=int)
license_detection_issue = attr.ib()
files = attr.ib(factory=list)
@staticmethod
def get_formatted_unique_issue(
license_issue, files, unique_identifier
):
return UniqueIssue(
license_detection_issue=license_issue.to_dict(),
files=files,
unique_identifier = unique_identifier,
)
@staticmethod
def get_unique_issues(license_issues):
"""
Get all unique license detection issues for the scan
and their occurances, from all the issues.
:param license_issues: list of LicenseDetectionIssue
:returns UniqueLicenseIssues: list of UniqueIssue
"""
identifiers = get_identifiers(license_issues)
unique_issue_category_counts = dict(Counter(identifiers))
unique_license_issues = []
for issue_number, (unique_issue_identifier, counts) in enumerate(
unique_issue_category_counts.items(), start=1,
):
file_regions = (
issue.file_regions.pop()
for issue in license_issues
if unique_issue_identifier in [issue.identifier, issue.identifier_for_unknown_intro]
)
all_issues = (
issue
for issue in license_issues
if unique_issue_identifier in [issue.identifier, issue.identifier_for_unknown_intro]
)
unique_license_issues.append(
UniqueIssue.get_formatted_unique_issue(
files=list(file_regions),
license_issue=next(all_issues),
unique_identifier=issue_number,
)
)
return unique_license_issues
def get_identifiers(license_issues):
"""
Get identifiers for all license detection issues.
:param license_issues: list of LicenseDetectionIssue
:returns identifiers: list of tuples
"""
identifiers = (
issue.identifier if issue.issue_category != "unknown-match"
else issue.identifier_for_unknown_intro
for issue in license_issues
)
return identifiers
|
/scancode-analyzer-21.4.7.tar.gz/scancode-analyzer-21.4.7/src/scancode_analyzer/summary.py
| 0.810966 | 0.254743 |
summary.py
|
pypi
|
Selecting Incorrect Scan Cases
==============================
The steps of analysing license matches in a file and flagging potential license detection issues
are:
1. Dividing Matches in file-regions - :ref:`location_regions_division`
2. Detecting License Detection Issues in file-regions - :ref:`analysis`
3. Grouping the issues into classes and subclasses of issues - :ref:`dividing_into_more_cases`
4. Getting rid of same issues across package - :ref:`ignoring_same_cases_in_package`
5. Resolving issues based on groups - :ref:`resolving_issues`
.. _location_regions_division:
Dividing Matches into Region Groups
-----------------------------------
All the matches detected in a file, are grouped into `file-regions <file_region>`_,
(i.e. one file would have multiple, or at least one file-region) and then the analysis is
performed separately on all these file-regions as these are to be handled independently
from each other.
These 3 attributes in the analysis results has information on which file-region the matches are in.
1. ``start_line`` - The line number where this current file region starts from, in the file.
2. ``end_line`` - The line number where this current file region ends at, in the file.
3. ``original_license`` - All the matches detected by scancode that are in this file-region.
.. _file_region:
File Region
^^^^^^^^^^^
A file-region is::
A location in a file having one or multiple group of license matches, overlapping each other or
located very closely, such that it seems to belong to one form of license declaration.
File -> file-region is a one to many relationship.
Why we need to divide matches in a file into file-regions:
1. A file could have multiple different license information/declarations in multiple regions, and
so issues in detecting one of these doesn't effect detection of the others.
2. If there are multiple matches in a region, they need to be analyzed as a whole, as even if most
matches have perfect ``score`` and ``match_coverage``, only one of them with a imperfect
``match_coverage`` would mean there is a issue with that whole file-region. For example one
license notice can be matched to a notice rule with imperfect scores, and several small
license reference rules.
3. In times of creating a Rule out of the issue/ dealing with it, we need the matches grouped by
file-regions.
File-Region Example
^^^^^^^^^^^^^^^^^^^
In example - `scancode-toolkit#1907 <https://github.com/nexB/scancode-toolkit/issues/1907#issuecomment-597773239>`_
- the first GPL detection is for this line range - {"start_line": 8, "end_line": 19}
- the second is for the free-unknown for this line range: {"start_line": 349, "end_line": 350}
Now, for a single file, we group all the detections by locations, to select the ones which
are correctly detected, and the wrong detections.
Here we’re using a threshold, which is after grouping all these detections by start and end line
numbers, if two of these groups are there spaces by at least N number of lines, they will be
treated as separate groups.
Scancode also uses a similar threshold while breaking down queries into slices based on the number
of lines between them, so as the use cases are exactly similar, so we are using the same threshold.
From ``scancode/src/licensedcode/query.py``, in ``__init__`` for ``Class Query``,
``line_threshold`` is set as 4.
“Break query in runs when there are at least `line_threshold` empty lines or junk-only lines.”
File-Region Grouping Algorithm
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Algorithm as for Grouping based on Location -
- Step 1: Start from the first match, and assign it into the first group.
- Step 2: Initialize boundaries to the start/end line of this match.
- Step 3: Go by every match executing these instructions :-
- If entirely inside the boundary, include in the current group.
- If partly inside the boundary, extend boundaries and include in the current group.
- If very close to boundary, i.e. less than a threshold, extend boundaries and include in the
current group.
- Else, if outside boundary, go to step 1 making this match as a new group.
- Repeat until there’s no matches left.
As there’s never too many detections in a file, and there’s almost always detections which have
almost all of the matched texts, and as the matches are sorted according to their start/end lines,
this is efficient enough, and passes through the list of matches once.
.. _analysis:
File-regions with Incorrect Scans
---------------------------------
The attribute ``issue_id`` in the analysis results has information on if the
file-region has any license detection issue in it, bases on coverage values, presence of extra words
or false positive tags.
.. note::
The 5 possible values of ``issue_id`` are:
1. ``imperfect-match-coverage``
2. ``near-perfect-match-coverage``
3. ``extra-words``
4. ``false-positive``
5. ``unknown-match``
If we do not have an issue, it is a correct license detection.
Scancode detects most licenses accurately, so our focus is only on the parts where the detection has
issues, and so primarily in the first step we separate this from the Correct Scans.
Initially from the `matcher` information we can say that
IF the license matcher is “1-hash” or “4-spdx-id” they are correct matches, all incorrect matches
lie in the other two matchers, i.e. “2-aho” and “3-seq”.
So in ``Step 1``::
So mark all license matches with matcher “1-hash” and “4-spdx-id” first, as none of them
are wrong detections, and also detections where all the matches have a perfect
``match_coverage``, i.e. 100.
These fall into the first category:
1. ``correct-license-detection``
Then in ``Step 2`` we come into “score” and “match_coverage” values.
There are multiple matches in a File, and the individual (for each match) scores are calculated as
``score = matched_coverage * rule_relevance``
So if the score is less, there’s two possibilities::
i. one is that the license information present itself is inadequate, but scancode detects that
correctly, here match_coverage is always 100.
ii. It doesn't match entirely, making the match_coverage less than 100.
So for now, we segregate incorrect matches as follows::
IN A FILE, among all the multiple matches per file, if even one of them has a match_coverage
value below a threshold, (say 100), it has a wrong detection potentially, and we flag all the
detected matches of that file, for further analysis and segregation.
There is also another case where ``score != matched_coverage * rule_relevance``, where there are
some extra words, i.e. the entire rule was matched, but there were some extra words which caused the
decrease in score.
So the 3 category of issues as classified in this step are:
2. ``imperfect-match-coverage``
3. ``near-perfect-match-coverage``
4. ``extra-words``
Also note that this order is important, as if any one of the matches has this case, the entire file
will be flagged as such.
And another case taking into account the false-positives, where the rule length would be
less than a threshold (i.e. say less than 4 words) and the start-line of the match should
be more than a threshold (i.e. say more than 1000) for it to be considered a false positive.
This is the ``Step 3`` and here a NLP sentence Classifier could be used to improve accuracy.
The issue class is called:
5. ``false-positives``
Even if all the matches has perfect `match_coverage`, if there are `unknown` license
matches there, there's likely a license detection issue. This issue is a:
6. ``unknown-match``
.. _dividing_into_more_cases:
Dividing the issues into more cases
-----------------------------------
These cases (group of matches in file-regions) are then divided into more types of issues in two
steps:
- Case of License Information (Text/Notice/Tag/References)
- Sub-cases for each of these 4 cases
Go to :ref:`lic_detection_issue_types` for detailed discussions and a comprehensive list of
all possible attribute values (i.e. all types of potential license detection issue) in results.
.. _ignoring_same_cases_in_package:
Ignoring Same Incorrect Scans, Package Wise
-------------------------------------------
So in Scancode, most of the code files have the same header license notice, and some of them, which
are derived from other packages, have other different license notices.
Now this practice is common across a lot of packages, as license notices/references/tags, or in
some cases even entire texts(I’ve not encountered examples of these?) being present in a lot of
files. Now naturally if one of these is not detected correctly by scancode license detection,
other exactly similar ones will also be not detected correctly.
We need not have all of these incorrect matches, we only need one of every unique case.
So in order to report only unique ones, we use a combination of “matched_rule_identifier”
and “match_coverage” to determine uniqueness of the matches. But we use this file-wise.
I.e. the policy is::
If multiple files have the same N number of matches, all these matches having same
“matched_rule_identifier” and “match_coverage” across these multiple files, we keep only
one file among them and discard the others.
For example, in `scancode-toolkit#1920 <https://github.com/nexB/scancode-toolkit/issues/1920>`_, socat-2.0.0 has
multiple (6) files with each file having the same 3 matched rules and match_coverage sets, i.e. -
- {"gpl-3.0-plus_with_tex-exception_4.RULE", 13.21}
- {gpl-3.0-plus_with_tex-exception_4.RULE”, 13.21}
- {gpl-2.0_756.RULE", 100.0}
So, we need to keep only one of these files, as the others have the same license detection errors.
.. note::
This is performed in the summary plugin, where all the unique license detection issues are
reported in the summary together, each with a list of their occurrences.
|
/scancode-analyzer-21.4.7.tar.gz/scancode-analyzer-21.4.7/docs/source/how-analysis-is-performed/selecting-incorrect-unique.rst
| 0.909505 | 0.790571 |
selecting-incorrect-unique.rst
|
pypi
|
.. _lic_detection_issue_types:
License Detection Issue Types
=============================
There are 4 types of license information as segregated by scancode-toolkit, based on rule types.
- text
- notices
- tags
- references.
Note that this is the order of precedence, as in in a file-region if there's one type higher in this
order and other several types lower in the order, the issue in the file-region is likely in the
former type. For example if one of the matches is matched to a `notice` rule and other matches
to `reference` rules, then the file-region in question is likely a License Notice.
These carry different importance of the license information present, and thus in case of license
scan errors, there are fundamentally different types of problems which may require different
approaches to solve them.
These are primarily segregated by the type of the rule matched, having the largest
``matched_length`` value, as a preliminary approach. It could also be determined by
NLP Sentence Classifiers (BERT), fine-tuned on the Scancode Rules.
Result Attributes
-----------------
In the results of the analysis, the attributes having this information is ``issue_type``.
This further has many attributes:
1. ``classification_id`` and ``classification_description``
2. 4 boolean fields ``is_license_text``, ``is_license_notice``, ``is_license_tag``, and
``is_license_reference``.
3. ``is_suggested_matched_text_complete`` and ``analysis_confidence``
Here, the ``classification_id`` attribute is a id which corresponds to a issue type from
`all possible issue types <issue_types_table>`_, which the license detection issue is
classified into. The ``classification_description`` describes the `issue_type` to provide
more information and context about the analysis.
There are 4 main types of issues in `issue_type` and these correspond to the 4 boolean flags in
scancode rules:
- ``is_license_text`` - :ref:`case_lic_text`
- ``is_license_notice`` - :ref:`case_lic_notice`
- ``is_license_tag`` - :ref:`case_lic_tag`
- ``is_license_reference`` - :ref:`case_lic_ref`
Now the ``analysis_confidence`` is an approximate measure of how accurate the classification into
these `issue_types` are (and not a measure of whether it is an issue or not). It has 3 values:
1. `high`, 2. `medium` and 3. `low`
In many cases, and mostly in cases of a new license text, there are significant differences
between already seen licenses and this new license. So as a consequence, all the matched fragments
if stitched together, doesn't contain the whole text. This ``is_suggested_matched_text_complete``
attribute has this information.
.. note::
Now only issues with `is_license_text` as True has it's `is_suggested_matched_text_complete`
value as false.
.. _issue_types_table:
All Issue Types
---------------
.. list-table::
:widths: 5 15 15
:header-rows: 1
* - ``license``
- ``issue_type::classification_id``
- ``Description``
* - ``text``
- ``text-legal-lic-files``
- The matched text is present in a file whose name is a known legal filename.
* - ``text``
- ``text-non-legal-lic-files``
- The matched license text is present in a file whose name is not a known legal filename.
* - ``text``
- ``lic-text-fragments``
- Only parts of a larger license text are detected.
* - ``notice``
- ``and-or-with-notice``
- A notice with a complex license expression (i.e. exceptions, choices or combinations).
* - ``notice``
- ``single-key-notice``
- A notice with a single license.
* - ``notice``
- ``notice-has-unknown-match``
- License notices with unknown licenses detected.
* - ``notice``
- ``notice-false-positive``
- A piece of code/text is incorrectly detected as a license.
* - ``tag``
- ``tag-low-coverage``
- A part of a license tag is detected
* - ``tag``
- ``other-tag-structures``
- A new/common structure of tags are detected with scope for being handled differently.
* - ``tag``
- ``tag-false-positives``
- A piece of code/text is incorrectly detected as a license.
* - ``reference``
- ``lead-in-or-unknown-reference``
- Lead-ins to known license references are detected.
* - ``reference``
- ``low-coverage-reference``
- License references with a incomplete match.
* - ``reference``
- ``reference-to-local-file``
- Matched to an unknown rule as the license information is present in another file,
which is referred to in this matched piece of text.
* - ``reference``
- ``reference-false-positive``
- A piece of code/text is incorrectly detected as a license.
* - ``intro``
- ``intro-unknown-match``
- A piece of common introduction to a license text/notice/reference is detected.
.. _case_lic_text:
License Texts
-------------
All the `issue_types` with `is_license_text` as True.
License Text Files
^^^^^^^^^^^^^^^^^^
.. note::
Value of ``issue_type:classification_id`` :- ``text-legal-lic-files``
- [More Than 90% License Words/Legal File]
Here the “is_license_text” plugin is used to detected if it’s a License File or Not, also “is_legal”
can be used for the detection, so an OR operation between these two cases.
So, if the full text is there in the “matched_text” we can go ahead and craft the rule from the
``matched_text``.
License Texts in Files
^^^^^^^^^^^^^^^^^^^^^^
.. note::
Value of ``issue_type:classification_id`` :- ``text-non-legal-lic-files``
- [with less than 90% License Words]
In some cases, one of the “is_license_text” and “is_legal” tags, or even both could be False, and it
still could be classified as a License Text because
- the Rule it was partially matched was a license text rule
- the ``license-type`` sentence classifier designated it as a license text
Note: In this case how “is_license_text” and “is_legal” is calculated could be updated, based on
common mistakes.
Full text doesn’t exist in matched_text
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
Value of ``issue_type:classification_id`` :- ``text-lic-text-fragments``
Where the Full text doesn’t exist in matched_text and we have to go to/fetch the source file which
was scanned.
This is a common occurance in new unique license texts, which aren't fully present. Normally these
are detected by the ``3-seq`` matcher stage.
On scanning License Texts Present in scancode, by reindexing the license index to the state before
that particular text was added, we can see how the scan results look when entirely new license texts
are encountered.
So it seems as the license text is large, and varies a lot from already existing license texts, the
entire text doesn’t exist inside “matched_text”, so we have to go to the source file which was
scanned and add it from there.
For example these are the results for the “cern-ohl-w-2.0.LICENSE” file scanned by taking scancode
to a state where it wasn’t added.
Scan Result File has multiple partial matches
- " it applies as licensed under CERN-OHL-S or CERN-OHL-W"
- " licensed under CERN-OHL-S or CERN-OHL-W as appropriate."
- " licensed under a licence approved by the Free Software"
- " interfaced, which remain licensed under their own applicable"
- " direct, indirect, special, incidental, consequential, exemplary,\n
punitive or other damages of any character including, without\n
limitation, procurement of substitute goods or services, loss of\n
use, data or profits, or business interruption, however caused\n
and on any theory of contract, warranty, tort (including\n
negligence), product liability or otherwise, arising in any way\n
in relation to the Covered Source, modified Covered Source\n
and/or the Making or Conveyance of a Product, even if advised of\n
the possibility of such damages, and You shall hold the"
- " 7.1 Subject to the terms and conditions of this Licence, each"
- " You may treat Covered Source licensed under CERN-OHL-W as"
- " licensed under CERN-OHL-S if and only if all Available"
Clearly the actual license has a lot more text, which we can only get by going to the source.
.. _case_lic_notice:
License Notices
---------------
All `issue_types` with their `is_license_notice` value as True.
Exceptions, Rules with Keys having AND/OR
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
Value of ``issue_type:classification_id`` :- ``notice-and-or-with-notice``
Where there are multiple "notice" license detections, not of the same license name, in a single
file. These are often:
- dual licenses
- exceptions
These have multiple license detections and some times new combinations are detected, and has to be
added to the Rules.
Single key notices
^^^^^^^^^^^^^^^^^^
.. note::
Value of ``issue_type:classification_id`` :- ``notice-single-key-notice``
This is the general case of License Notice cases, so if it's a license notice case and doesn't fall
into the other license notice cases detailed below, then it belongs in this category.
These are often detected as License Notices are often unique in projects, and for these rules can be
crafted with fairly high confidence as almost always the entire text is present in "matched_text".
.. _case_lic_tag:
License Tags
------------
All `issue_types` with their `is_license_tag` value as True.
Wrong License Tag Detections
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
Value of ``issue_type:classification_id`` :- ``tag-tag-coverage``
Among all “is_license_tag” = True cases, if match_coverage is less than 100, then it is a wrong
license detection, and as tags are small and matched_text almost always contains the whole tag, a
Rule can be created from these class of Problems.
This is the general case of License Tag cases, so if it's a license tag case and doesn't fall into
the other license tag cases detailed below, then it belongs in this category.
Other common Structures of Tags
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
Value of ``issue_type:classification_id`` :- ``tag-other-tag-structures``
There exists specific Tags, for group of projects, and these are mostly found in source code files,
in the code itself.
Like for example::
<small>License: <a href="http://www.fsf.org/licensing/licenses/fdl.html">GNU Free Documentation License (FDL)</a></small>
Or ``MODULE_LICENSE`` present in linux kernel source code.
We can cluster the data according to occurrences of same types of structures, and attributes used to
cluster/separate could be:
- Programming Language
- Type of Files?
Related Issue - https://github.com/nexB/scancode-toolkit/issues/707
Finding False Positives from License Tags Detections
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
Value of ``issue_type:classification_id`` :- ``tag-false-positives``.
There also exists ``notice-false-positives`` and ``reference-false-positives``, similarly.
In these cases, value of ``issue_id`` :- ``false-positives``
Now, the “is_license_tag” is mostly true for these, but the “match_coverage” is always 100
in these cases. These are almost always wrongly detected by some handful of rules which has only the
words gpl/lgpl or similar ones. So we further narrow our search down to only 1-3 word rules and
and an additional criteria being if the license match occurs at line number more than a certain
value, say 1000 or more.
But this also includes a lot of correct detections, which are correctly detected.
This classifying of “False Positives” from “Correct Tag Detection” is solely based on the
matched text, and should be solved by a BERT based sentence classifier. The binary classification
would be between false-positives and license-tags.
The data needed to train that model, which we can get from two places:-
1. The already existing scancode license rules, has a lot of examples of False Positives and
Correct License Tags
2. More training data
We could make use of the classifier confidence scores to only look at ambigous cases only.
.. note::
In some cases some more lines above and below are needed to be added to these false_positive
rules, as the ``matched_text`` can be too general for a false positive rule. This could require
manual work.
.. _case_lic_ref:
License References
------------------
All the `issue_types` with `is_license_reference` as True.
Those with low match coverages
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
Value of ``issue_type:classification_id`` :- ``reference-low-coverage-refs``
This is the most common type of license detection errors, as there exist a lot of
license references, and they can be added. These are also highly fixable problems, as almost always
the whole license reference is captured in ``matched_text``
We should separate these location wise, and add as new rules without any manual oversight.
This is the general case of License Reference cases, so if it's a license reference case and doesn't
fall into the other license reference cases detailed below, then it belongs in this category.
unknown file license references
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
Value of ``issue_type:classification_id`` :- ``reference-to-local-file``
In many cases the license that is referred to is in another file, and only the filename is given,
and not the license name. Example - "see license in file LICENSE.txt"
In these cases if there are more context/specific wording add these as new unknown rules.
So we separate these based on their matched_rules, i.e. if these are matched to an “unknown” or
similar kinds of non-explicitly named rules.
Other wise discard, as this is a issue to be handled separately, by implementing a system in
scancode where these links are followed and their license added.
Introduction to a License Notice
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
Value of ``issue_type:classification_id`` :- ``reference-lead-in-or-unknown-refs``
There are cases where the RULE name begins with ``lead-in_unknown_``, i.e. these are known lead-ins
to licenses, so even if the exact license isn't detected, it can be reported that there is a
license reference here.
Here we could add to the Scancode Rules, the license reference, or as in the example case below,
craft a new rule by joining the two existing ones
Example case:-
``Dual licensed under`` is ``lead-in_unknown_30.RULE``
say there is another rule: ``MIT and GPL``
and the text we scan is : ``Dual licensed under MIT and GPL``
To Note: If they appear quite frequently, it is okay to craft a new rule. Because we cannot just add
all combinations of lead-ins and license names.
|
/scancode-analyzer-21.4.7.tar.gz/scancode-analyzer-21.4.7/docs/source/how-analysis-is-performed/cases-incorrect-scans.rst
| 0.911239 | 0.672905 |
cases-incorrect-scans.rst
|
pypi
|
import numpy as np
import pandas as pd
from results_analyze.rules_analyze import LicenseRulesInfo
class ResultsDataFrameFile:
def __init__(self):
self.lic_rule_info = LicenseRulesInfo(has_loaded=False)
# Which columns to drop from a File Level Dataframe.
self.drop_columns_list_file_lev = ['type', 'name', 'base_name', 'extension', 'date', 'md5',
'license_expressions', 'holders', 'copyrights', 'authors', 'packages',
'emails', 'urls', 'files_count', 'dirs_count', 'size_count', 'scan_errors']
# Which columns to drop from a License Level Dataframe.
self.drop_columns_list_lic_lev = ['name', 'short_name', 'owner', 'homepage_url', 'text_url', 'reference_url',
'spdx_license_key', 'spdx_url', 'license_expression', 'matched_rule',
'licenses']
# String to Integer Mappings for Compression
self.category_dict = {'Copyleft Limited': 5, 'Copyleft': 6, 'Proprietary Free': 7,
'Permissive': 1, 'Public Domain': 2, 'Free Restricted': 3, 'Source-available': 4,
'Commercial': 8, 'Unstated License': 0, 'Patent License': 9}
self.matcher_dict = {'1-hash': 1, '2-aho': 2, '3-seq': 3, '4-spdx-id': 4}
@staticmethod
def dict_to_rows_matched_rule_dataframes_apply(dataframe):
"""
Makes Dicts keys inside dict 'matched_rule' -> Columns of License level DataFrame.
:param dataframe: pd.DataFrame
"""
new_df = pd.DataFrame(list(dataframe['matched_rule']))
# Merge By Index, which basically Appends Column-Wise
dataframe = dataframe.join(new_df)
return dataframe
def modify_lic_level_dataframe(self, dataframe_lic):
"""
Modifies License level DataFrame, from 'matched_rule' dicts, bring information to columns.
Maps Rule Names and other strings to integer values to compress.
:param dataframe_lic: pd.DataFrame
:return dataframe_lic: pd.DataFrame
"""
# From dict 'matched_rule' expand keys to DataFrame Columns
dataframe_lic_rule = self.dict_to_rows_matched_rule_dataframes_apply(dataframe_lic)
# Drops Unnecessary Columns
dataframe_lic_rule.drop(columns=self.drop_columns_list_lic_lev, inplace=True)
return dataframe_lic_rule
def compress_lic_level_df(self, dataframe_lic):
"""
The following are converted from Dictionary Mappings loaded with constructor (Short dicts)
- "category" (Category of License, i.e. Permissive, Copyleft)
- "matcher" (type of matcher used i.e. 2-aho)
The following are converted from Dictionary Mappings loaded from LicenseRulesInfo (much longer dicts)
- "key" - License Key That is Detected (like - "mit")
- "identifier" - License or License Rule that is used to detect the license (i.e. "mit_456.RULE"/"mit.LICENSE")
:param dataframe_lic: pd.DataFrame
License Level DataFrame
"""
dataframe_lic["category"] = dataframe_lic["category"].map(self.category_dict).fillna(0).astype(np.uint8)
dataframe_lic["matcher"] = dataframe_lic["matcher"].map(self.matcher_dict).fillna(0).astype(np.uint8)
dataframe_lic["key"] = dataframe_lic["key"].map(self.lic_rule_info.key_dict).fillna(0).astype(np.uint16)
dataframe_lic["identifier"] = dataframe_lic["identifier"].map(
self.lic_rule_info.identifier_dict).fillna(0).astype(np.uint16)
def create_lic_level_dataframe(self, file_level_dataframe):
"""
Takes a File Level DataFrame, creates license level dataframes, modifies and cleans them up and
appends columns to file level dataframes. Here, already existing file level info is also present at each
license level rows.
:param file_level_dataframe: pd.DataFrame
:returns merged_df: pd.DataFrame
"""
# For each file, add license level dict-keys to new columns, and multiple licenses per file into new rows
# Introduces new column 'level_1'(renamed to 'lic_det_num'), which is the primary key for
# each license detection inside one file.
lic_level_dataframe = file_level_dataframe.groupby('sha1').licenses.apply(
lambda x: pd.DataFrame(x.values[0])).reset_index()
lic_level_dataframe.rename(columns={'level_1': 'lic_det_num'}, inplace=True)
# Modifies license level information
lic_level_dataframe = self.modify_lic_level_dataframe(lic_level_dataframe)
# makes sha1 column as the file level Index [Primary Key].
lic_level_dataframe.set_index('sha1', inplace=True)
# Remove "licenses" column, as the data from it is already added
file_level_dataframe.drop(columns=["licenses"], inplace=True)
merged_df = file_level_dataframe.join(lic_level_dataframe, lsuffix='_file', rsuffix='_lic')
merged_df.reset_index(inplace=True)
self.compress_lic_level_df(merged_df)
return merged_df
def modify_file_level_dataframe(self, dataframe_files):
"""
Takes a File Level DataFrame, drops unnecessary columns, drops all directory rows, drops same files,
drop files with no license detections, and makes sha1 column as the file level Index [Primary Key].
:param dataframe_files: pd.DataFrame
File Level DataFrame
:returns has_data: bool
If A File Level DataFrame is non-empty
"""
# Drops Unnecessary Columns
dataframe_files.drop(columns=self.drop_columns_list_file_lev, inplace=True)
# Drops all rows with file_type as directories, as they have `NaN` as their `sha1` values
dataframe_files.dropna(subset=['sha1'], inplace=True)
# Add a column number of license detections per file, and drop files with no license detections
dataframe_files['license_detections_no'] = dataframe_files.licenses.apply(lambda x: np.shape(x)[0])
dataframe_files.drop(dataframe_files[~ (dataframe_files['license_detections_no'] > 0)].index, inplace=True)
if dataframe_files.shape[0] == 0:
has_data = False
return has_data
else:
has_data = True
# Drops files that have the same sha1 hash, i.e. essentially similar files
dataframe_files.drop_duplicates(subset='sha1', keep="last", inplace=True)
# Makes SHA1 column the index (Slows down calculations)
dataframe_files.set_index('sha1', inplace=True)
return has_data
def create_file_level_dataframe(self, package_files_list):
"""
Creates a File and License Level DataFrame
:param package_files_list: list of file level dicts
:returns has_data: bool
If A File Level DataFrame is non-empty
:returns file_and_lic_level_dataframe: pd.DataFrame
Has File and License level information organized via pd.MultiIndex.
"""
# Initialize the file package level list into a DataFrame (Row - Files, Columns - Dict keys inside Files)
file_level_dataframe = pd.DataFrame(package_files_list)
# Clean Up and Modify the File Level DataFrame
has_data = self.modify_file_level_dataframe(file_level_dataframe)
# Checks if file_level_dataframe is Empty (i.e. if none of the files has any license information)
# Exits if Yes with empty DataFrame which won't be added
if not has_data:
return has_data, file_level_dataframe
# From column 'licenses', which is a list of dicts, create License Level DataFrames
file_and_lic_level_dataframe = self.create_lic_level_dataframe(file_level_dataframe)
# Sets 'sha1' and 'lic_det_num' columns as the Indexes (Primary Key Tuple)
file_and_lic_level_dataframe.set_index(['sha1', 'lic_det_num'], inplace=True)
return has_data, file_and_lic_level_dataframe
|
/scancode-analyzer-21.4.7.tar.gz/scancode-analyzer-21.4.7/etc/load_scan_into_dataframe/load_results_file.py
| 0.833765 | 0.420064 |
load_results_file.py
|
pypi
|
import os
import gzip
import json
import pandas as pd
import numpy as np
from results_analyze.postgres import PostgresFetch
from results_analyze.load_results_file import ResultsDataFrameFile
from results_analyze.df_file_io import DataFrameFileIO
# How many rows of Database to Fetch at once
# ToDo: Calculation Function based on memory usage stats and RAM/SWAP Available
NUM_ROWS_TO_FETCH = 20
# 'table' (A bit slower, On-Disk Search/Query Enabled) or 'fixed' (Fast, No On-Disk Search/Query)
HDF5_STORE_FORMAT = 'table'
class ResultsDataFramePackage:
def __init__(self, has_database=True):
"""
Constructor for ResultsDataFramePackage, initialized PostgresFetch and ResultsDataFrameFile objects,
and data paths and filenames used.
"""
if has_database:
self.postgres = PostgresFetch()
self.results_file = ResultsDataFrameFile()
self.df_io = DataFrameFileIO()
def append_metadata_dataframe(self, metadata_dataframe):
"""
Stores data from the a Pandas Dataframe, containing metadata to hdf5. Creates file if file doesn't exist.
:param metadata_dataframe : pd.Dataframe
The metadata DataFrame which has to be appended
"""
if not os.path.exists(self.df_io.hdf_dir):
os.makedirs(self.df_io.hdf_dir)
file_path = os.path.join(self.df_io.get_hdf5_file_path(self.df_io.hdf_dir, self.df_io.metadata_filename))
if not os.path.isfile(self.df_io.get_hdf5_file_path(self.df_io.hdf_dir, filename=self.df_io.metadata_filename)):
self.df_io.store_dataframe_to_hdf5(metadata_dataframe, file_path, df_key='metadata',
h5_format='Table', is_append=False)
else:
self.df_io.store_dataframe_to_hdf5(metadata_dataframe, file_path, df_key='metadata',
h5_format='Table', is_append=True)
@staticmethod
def decompress_dataframe(compressed_dataframe):
"""
This function is applied to one column of a Dataframe containing memoryview objects, at once,
using the DataFrame.apply() method, to perform vectorized decompression. Returns a Pandas Series object
each row having the corresponding JSON dict.
:param compressed_dataframe : pd.Series
One column of a DataFrame, containing Compressed memoryview objects.
:returns decompressed_dataframe : pd.Series
One column of a DataFrame, containing JSON dicts of Scan Results.
"""
string_json = gzip.decompress(compressed_dataframe).decode('utf-8')
decompressed_dataframe = json.loads(string_json)
return decompressed_dataframe
def convert_records_to_json(self, num_rows_to_fetch=NUM_ROWS_TO_FETCH):
"""
Fetch scan_results from Postgres Database, Load into Pandas Dataframes, and Decompress into JSON dicts.
:param num_rows_to_fetch : int
Number of Rows to Fetch from the Postgres Database, which is essentially the number of packages scanned.
:returns dataframe_memoryview : pd.DataFrame
DataFrame containing two Columns 'path' and 'json_content'.
"""
# Fetch A specified rows of Data From postgres Database, and load into a DataFrame
data_memoryview = self.postgres.fetch_data(num_rows_to_fetch)
dataframe_memoryview = pd.DataFrame(data_memoryview, columns=['path', 'memoryview'])
# Decompress entire `memoryview` column, add decompressed JSON dicts at `json_content`, then drop former.
dataframe_memoryview['json_content'] = dataframe_memoryview.memoryview.apply(self.decompress_dataframe)
dataframe_memoryview.drop(columns=['memoryview'], inplace=True)
return dataframe_memoryview
@staticmethod
def dict_to_rows_in_dataframes_l2(dataframe, key_1, key_2):
"""
This function is applied to one column of a Dataframe containing json dicts, at once,
using the DataFrame.apply() method, to perform vectorized data retrieval.
:param dataframe : pd.Series
One column of a DataFrame, containing json dicts.
:param key_1 : string
:param key_2 : string
:returns row_data : pd.Series
One column of a DataFrame, containing values/dicts/lists that were inside those JSON dicts.
"""
row_data = dataframe[key_1][key_2]
return row_data
@staticmethod
def dict_to_rows_in_dataframes_l3(dataframe, key_1, key_2, key_3):
"""
This function is applied to one column of a Dataframe containing json dicts, at once,
using the DataFrame.apply() method, to perform vectorized data retrieval.
:param dataframe : pd.Series
One column of a DataFrame, containing json dicts.
:param key_1 : string
:param key_2 : string
:param key_3: string
:returns row_data : pd.Series
One column of a DataFrame, containing values/dicts/lists that were inside those JSON dicts.
"""
row_data = dataframe[key_1][key_2][0][key_3]
return row_data
def dict_to_rows_in_dataframes_apply(self, dataframe, key_1, key_2):
"""
This function is applied to one column of a Dataframe containing json dicts, at once, to perform
vectorized data retrieval. Then convert the column of dicts to a list of dicts, to create dataframes from them.
The DataFrames columns are those dict keys.
:param dataframe : pd.DataFrame
DataFrame, containing json dicts in a column.
:param key_1 : string
:param key_2 : string
:returns dataframe : pd.DataFrame
DataFrame, containing new columns for each dict keys, from the dict inside the JSON dict.
"""
dataframe_dicts = dataframe.json_content.apply(self.dict_to_rows_in_dataframes_l2, args=(key_1, key_2))
new_df = pd.DataFrame(list(dataframe_dicts))
# Merge By Index, which basically Appends Column-Wise
dataframe = dataframe.join(new_df)
return dataframe
def value_to_rows_in_dataframes_apply(self, dataframe, key_1, key_2, name_value, key_3=None):
"""
This function is applied to one column of a Dataframe containing json dicts, at once, to perform
vectorized data retrieval. Then convert this row of values/lists to dataframes.
The DataFrames column name is the `name_value`.
:param dataframe : pd.DataFrame
One column of a DataFrame, containing json dicts.
:param key_1 : string
:param key_2 : string
:param key_3 : string
:param name_value : string
:return dataframe : pd.DataFrame
DataFrame, containing a new column for the value/list, from inside the JSON dict.
"""
if key_3 is None:
dataframe_dicts = dataframe.json_content.apply(self.dict_to_rows_in_dataframes_l2, args=(key_1, key_2))
else:
dataframe_dicts = dataframe.json_content.apply(self.dict_to_rows_in_dataframes_l3,
args=(key_1, key_2, key_3))
new_df = pd.DataFrame({name_value: dataframe_dicts})
# Merge By Index, which basically Appends Column-Wise
dataframe = dataframe.join(new_df)
return dataframe
@staticmethod
def convert_string_to_datetime(dataframe, old_col, new_col):
"""
This function takes a column of string datetime, and converts it into Pandas DatetimeIndex objects.
:param dataframe : pd.DatFrame
:param old_col : string : Name of Old Column
:param new_col : string : Name of New Column
"""
# Add Pandas DateTime Column
dataframe[new_col] = pd.to_datetime(dataframe[old_col].tolist(), format='%Y-%m-%d')
# Drop String DateTime Column
dataframe.drop(columns=[old_col], inplace=True)
def save_schema_num(self, schema_series):
"""
This function takes a Series containing schema counts, and appends it to schema DataFrame and saves on disk.
:param schema_series : pd.Series
"""
schema_df = pd.DataFrame(schema_series)
file_path = os.path.join(self.df_io.get_hdf5_file_path(self.df_io.hdf_dir, self.df_io.metadata_filename))
self.df_io.store_dataframe_to_hdf5(schema_df, file_path, df_key='schema', h5_format='table', is_append=True)
def assert_dataframe_schema(self, path_json_dataframe):
"""
This function takes a DataFrame containing columns 'path' and 'json_content', extracts info from path.
uses schema info to only keep schemaVersion 3.2.2, and saves the schema count which are deleted.
:param path_json_dataframe : pd.DatFrame
"""
# Splits the contents of 'path' column and adds another column 'list_split' containing lists
path_json_dataframe['list_split'] = path_json_dataframe['path'].str.split(pat="/")
# Convert these lists (each having 9 values) into DataFrame Columns named 0-8
split_df = pd.DataFrame.from_dict(
dict(zip(path_json_dataframe['list_split'].index, path_json_dataframe['list_split'].values))).T
# Give the path-split columns appropriate names
split_df.columns = pd.Index(['pkg_source_1', 'pkg_source_2', 'pkg_owner', 'pkg_name', 'revision',
'pkg_version', 'tool', 'scancode', 'schema_ver'])
# Save the Schema Version Counts
# self.save_schema_num(split_df.groupby(split_df['schema_ver'])['revision'].count())
# Merge these split path DataFrame with the Main DataFrame (appends columns)
merged_df = path_json_dataframe.join(split_df)
# Only keep Scancode scans of schemaVersion 3.2.2
merged_df.drop(merged_df[~ (merged_df['schema_ver'] == "3.2.2.json")].index, inplace=True)
# Columns 'revision', 'tool', 'scancode' has same entries, and info from "path", "list_split" is extracted
# Delete these unnecessary columns
merged_df.drop(columns=["path", "list_split", 'revision', 'tool', 'scancode', 'schema_ver'], inplace=True)
# Replace "-" entries in column "pkg_owner" with np.nan
merged_df.loc[merged_df["pkg_owner"] == '-', "pkg_owner"] = np.nan
return merged_df
def modify_package_level_dataframe(self, metadata_dataframe):
"""
This function is applied to one column of a Dataframe containing json dicts, at once, to perform
vectorized data retrieval. Then convert this row of values/lists to dataframes.
The DataFrames column name is the `name_value`.
:param metadata_dataframe : pd.DataFrame
:returns files_dataframe : pd.DataFrame
DataFrame, containing a two columns, which has the path_string in one, and has a list of dicts in each row
of the other column, which is list of file-level dicts.
:returns metadata_dataframe : pd.DataFrame
DataFrame, containing a new column for the value/list, from inside the JSON dict.
"""
metadata_dataframe = self.dict_to_rows_in_dataframes_apply(metadata_dataframe, key_1='content',
key_2='license_clarity_score')
metadata_dataframe = self.value_to_rows_in_dataframes_apply(metadata_dataframe, key_1='content',
key_2='headers', key_3='start_timestamp',
name_value='start_timestamp')
metadata_dataframe = self.value_to_rows_in_dataframes_apply(metadata_dataframe, key_1='content', key_2='files',
name_value='Files')
metadata_dataframe.drop(columns=['json_content'], inplace=True)
# Convert TimeProcess to TimeIndex
self.convert_string_to_datetime(metadata_dataframe, old_col='start_timestamp', new_col='TimeIndex')
files_dataframe = metadata_dataframe[['TimeIndex', 'Files']].copy(deep=True)
metadata_dataframe.drop(columns=['Files'], inplace=True)
return files_dataframe, metadata_dataframe
def compress_pkg_dataframe(self, main_df):
"""
Compressing Package Level DataFrame by changing DataTypes of some columns,
getting rid of unnecessary precision.
:param main_df: pd.DataFrame
Package Level DataFrame containing Scan Data.
"""
main_df["rule_relevance"] = main_df["rule_relevance"].astype(np.uint8)
main_df["rule_length"] = main_df["rule_length"].astype(np.uint16)
main_df["matched_length"] = main_df["matched_length"].astype(np.uint16)
main_df["license_detections_no"] = main_df["license_detections_no"].astype(np.uint16)
main_df["start_line"] = main_df["start_line"].astype(np.uint32)
main_df["end_line"] = main_df["end_line"].astype(np.uint32)
main_df["size"] = main_df["size"].astype(np.uint32)
# ToDo: Compress `file_type`, `mime_type`, String->Int Mapping
prog_lan_dict = self.df_io.get_prog_lang_dict()
main_df["programming_language"] = main_df["programming_language"].map(prog_lan_dict).fillna(0).astype(np.uint8)
def create_package_level_dataframe(self, json_filename=None, path_json_dataframe=None, load_df=True):
"""
Creates a Package Level DataFrame, with File/License Information Levels.
:param json_filename : String
Optional Parameter, if Passed, Takes input from a JSON File instead of a Postgres Database
:param path_json_dataframe : String
:param load_df : bool
:returns main_dataframe : df.DataFrame object
Main Storage DataFrame
Has Project, File and License level information organized via pd.MultiIndex.
"""
# Loads Dataframes
if load_df:
if json_filename:
path_json_dataframe = self.df_io.mock_db_data_from_json(json_filename)
else:
path_json_dataframe = self.convert_records_to_json()
# Asserts if Scancode SchemaVersion is desired value, from path
path_json_dataframe = self.assert_dataframe_schema(path_json_dataframe)
# Converts information multiple levels inside dicts into columns
# Package Level Data, TimeStamp, 'license_clarity_score' values,'files' list -> `New Columns`.
files_dataframe, metadata_dataframe = self.modify_package_level_dataframe(path_json_dataframe)
# Append metadata level information to a MetaData File
# self.append_metadata_dataframe(metadata_dataframe)
# ToDo: Parallelize the `results_file.create_file_level_dataframe` func call
# Iterate through all rows, (i.e. package scans), and calls file level function for each
# Appends the File and License Level DataFrame returned to a List.
file_level_dataframes_list = []
drop_files_index_list = []
for package_scan_result in files_dataframe.itertuples():
has_data, file_level_dataframe = self.results_file.create_file_level_dataframe(package_scan_result[2])
if has_data:
file_level_dataframes_list.append(file_level_dataframe)
else:
drop_files_index_list.append(package_scan_result[0])
# Drops the Files which has no License Information
files_dataframe.drop(drop_files_index_list, inplace=True)
# Creates File level keys, which are used to create package level keys in the MultiIndex
list_file_level_keys = list(files_dataframe['TimeIndex'])
# Concatenate File Level Dataframes from the list, and their corresponding keys
# into One Package Level Dataframe, using MultiIndex. Rename Primary Key column names.
main_dataframe = pd.concat(file_level_dataframes_list,
keys=list_file_level_keys)
main_dataframe.index.names = ['pkg_scan_time', 'file_sha1', 'lic_det_num']
# Compress Package Level DataFrame
self.compress_pkg_dataframe(main_dataframe)
return main_dataframe
|
/scancode-analyzer-21.4.7.tar.gz/scancode-analyzer-21.4.7/etc/load_scan_into_dataframe/load_results_package.py
| 0.73173 | 0.470007 |
load_results_package.py
|
pypi
|
"""
Monkeypatch Pool iterators so that Ctrl-C interrupts everything properly
derived from https://gist.github.com/aljungberg/626518
Copyright (c) Alexander Ljungberg. All rights reserved.
Modifications Copyright (c) nexB Inc. and others. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from multiprocessing import pool
from multiprocessing import TimeoutError
class ScanCodeTimeoutError(Exception):
pass
def wrapped(func):
"""
Ensure that we have a default timeout in all cases.
This is to work around some subtle Python bugs in multiprocessing
- https://bugs.python.org/issue8296
- https://bugs.python.org/issue9205
- https://bugs.python.org/issue22393
- https://bugs.python.org/issue38084
- """
# ensure that we do not double wrap
if func.__name__ != 'wrap':
def wrap(self, timeout=None):
try:
result = func(self, timeout=timeout or 3600)
except TimeoutError as te:
raise ScanCodeTimeoutError() from te
return result
return wrap
else:
return func
pool.IMapIterator.next = wrapped(pool.IMapIterator.next)
pool.IMapIterator.__next__ = pool.IMapIterator.next
pool.IMapUnorderedIterator.next = wrapped(pool.IMapUnorderedIterator.next)
pool.IMapUnorderedIterator.__next__ = pool.IMapUnorderedIterator.next
def get_pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
return pool.Pool(processes, initializer, initargs, maxtasksperchild)
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/scancode/pool.py
| 0.710829 | 0.206854 |
pool.py
|
pypi
|
from itertools import islice
from os.path import getsize
import logging
import os
import sys
from commoncode.filetype import get_last_modified_date
from commoncode.hash import multi_checksums
from scancode import ScancodeError
from typecode.contenttype import get_type
TRACE = os.environ.get('SCANCODE_DEBUG_API', False)
def logger_debug(*args):
pass
logger = logging.getLogger(__name__)
if TRACE:
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(
' '.join(isinstance(a, str) and a or repr(a) for a in args)
)
"""
Main scanning functions.
Each scanner is a function that accepts a location and returns a sequence of
mappings as results.
Note: this API is unstable and still evolving.
"""
def get_copyrights(
location,
deadline=sys.maxsize,
**kwargs,
):
"""
Return a mapping with a single 'copyrights' key with a value that is a list
of mappings for copyright detected in the file at `location`.
"""
from cluecode.copyrights import detect_copyrights
from cluecode.copyrights import Detection
detections = detect_copyrights(
location,
include_copyrights=True,
include_holders=True,
include_authors=True,
include_copyright_years=True,
include_copyright_allrights=False,
deadline=deadline,
)
copyrights, holders, authors = Detection.split(detections, to_dict=True)
results = dict([
('copyrights', copyrights),
('holders', holders),
('authors', authors),
])
# TODO: do something if we missed the deadline
return results
def get_emails(
location,
threshold=50,
test_slow_mode=False,
test_error_mode=False,
**kwargs,
):
"""
Return a mapping with a single 'emails' key with a value that is a list of
mappings for emails detected in the file at `location`.
Return only up to `threshold` values. Return all values if `threshold` is 0.
If test_mode is True, the scan will be slow for testing purpose and pause
for one second.
"""
if test_error_mode:
raise ScancodeError('Triggered email failure')
if test_slow_mode:
import time
time.sleep(1)
from cluecode.finder import find_emails
results = []
found_emails = ((em, ln) for (em, ln) in find_emails(location) if em)
if threshold:
found_emails = islice(found_emails, threshold)
for email, line_num in found_emails:
result = {}
results.append(result)
result['email'] = email
result['start_line'] = line_num
result['end_line'] = line_num
return dict(emails=results)
def get_urls(location, threshold=50, **kwargs):
"""
Return a mapping with a single 'urls' key with a value that is a list of
mappings for urls detected in the file at `location`.
Return only up to `threshold` values. Return all values if `threshold` is 0.
"""
from cluecode.finder import find_urls
results = []
found_urls = ((u, ln) for (u, ln) in find_urls(location) if u)
if threshold:
found_urls = islice(found_urls, threshold)
for urls, line_num in found_urls:
result = {}
results.append(result)
result['url'] = urls
result['start_line'] = line_num
result['end_line'] = line_num
return dict(urls=results)
SPDX_LICENSE_URL = 'https://spdx.org/licenses/{}'
DEJACODE_LICENSE_URL = 'https://enterprise.dejacode.com/urn/urn:dje:license:{}'
SCANCODE_LICENSEDB_URL = 'https://scancode-licensedb.aboutcode.org/{}'
SCANCODE_DATA_BASE_URL = 'https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data'
SCANCODE_LICENSE_URL = f'{SCANCODE_DATA_BASE_URL}/licenses/{{}}.LICENSE'
SCANCODE_RULE_URL = f'{SCANCODE_DATA_BASE_URL}/rules/{{}}'
def get_licenses(
location,
min_score=0,
include_text=False,
license_text_diagnostics=False,
license_diagnostics=False,
deadline=sys.maxsize,
unknown_licenses=False,
**kwargs,
):
"""
Return a mapping or license_detections for licenses detected in the file at
`location`
This mapping contains two keys:
- 'license_detections' with a value that is list of mappings of license information.
- 'detected_license_expression' with a value that is a license expression string.
`min_score` is a minimum score threshold from 0 to 100. The default is 0,
meaning that all license matches are returned. If specified, matches with a
score lower than `minimum_score` are not returned.
If `include_text` is True, matched text is included in the returned
`licenses` data as well as a file-level `percentage_of_license_text`
as the percentage of file words detected as license text or notice.
This is used to determine if a file contains mostly licensing.
If ``unknown_licenses`` is True, also detect unknown licenses.
"""
from licensedcode.cache import build_spdx_license_expression
from licensedcode.cache import get_cache
from licensedcode.detection import detect_licenses
from packagedcode.utils import combine_expressions
license_clues = []
license_detections = []
detected_expressions = []
detected_license_expression = None
detected_license_expression_spdx = None
detections = detect_licenses(
location=location,
min_score=min_score,
deadline=deadline,
unknown_licenses=unknown_licenses,
**kwargs,
)
all_qspans = []
detection = None
for detection in detections:
all_qspans.extend(detection.qspans)
if detection.license_expression is None:
detection_mapping = detection.to_dict(
include_text=include_text,
license_text_diagnostics=license_text_diagnostics,
license_diagnostics=license_diagnostics,
)
license_clues.extend(detection_mapping["matches"])
else:
detected_expressions.append(detection.license_expression)
license_detections.append(
detection.to_dict(
include_text=include_text,
license_text_diagnostics=license_text_diagnostics,
license_diagnostics=license_diagnostics,
)
)
if TRACE:
logger_debug(f"api: get_licenses: license_detections: {license_detections}")
logger_debug(f"api: get_licenses: license_clues: {license_clues}")
if detected_expressions:
detected_license_expression = combine_expressions(
expressions=detected_expressions,
relation='AND',
unique=True,
)
detected_license_expression_spdx = str(build_spdx_license_expression(
detected_license_expression,
licensing=get_cache().licensing
))
percentage_of_license_text = 0
if detection:
percentage_of_license_text = detection.percentage_license_text_of_file(all_qspans)
return dict([
('detected_license_expression', detected_license_expression),
('detected_license_expression_spdx', detected_license_expression_spdx),
('license_detections', license_detections),
('license_clues', license_clues),
('percentage_of_license_text', percentage_of_license_text),
])
SCANCODE_DEBUG_PACKAGE_API = os.environ.get('SCANCODE_DEBUG_PACKAGE_API', False)
def _get_package_data(location, application=True, system=False, **kwargs):
"""
Return a mapping of package manifest information detected in the file at ``location``.
Include ``application`` packages (such as pypi) and/or ``system`` packages.
Note that all exceptions are caught if there are any errors while parsing a
package manifest.
"""
assert application or system
from packagedcode.recognize import recognize_package_data
try:
return recognize_package_data(
location=location,
application=application,
system=system
) or []
except Exception as e:
if TRACE:
logger.error(f'_get_package_data: location: {location!r}: Exception: {e}')
if SCANCODE_DEBUG_PACKAGE_API:
raise
else:
# attention: we are swallowing ALL exceptions here!
pass
def get_package_info(location, **kwargs):
"""
Return a mapping of package information detected in the file at `location`.
This API function is DEPRECATED, use `get_package_data` instead.
"""
import warnings
warnings.warn(
"`get_package_info` is deprecated. Use `get_package_data` instead.",
DeprecationWarning,
stacklevel=1
)
packages = _get_package_data(location, **kwargs) or []
return dict(packages=[p.to_dict() for p in packages])
def get_package_data(location, application=True, system=False, **kwargs):
"""
Return a mapping of package manifest information detected in the file at
`location`.
Include ``application`` packages (such as pypi) and/or ``system`` packages.
"""
if TRACE:
print(' scancode.api.get_package_data: kwargs', kwargs)
package_datas = _get_package_data(
location=location,
application=application,
system=system,
**kwargs,
) or []
return dict(package_data=[pd.to_dict() for pd in package_datas])
def get_file_info(location, **kwargs):
"""
Return a mapping of file information collected for the file at `location`.
"""
result = {}
# TODO: move date and size these to the inventory collection step???
result['date'] = get_last_modified_date(location) or None
result['size'] = getsize(location) or 0
sha1, md5, sha256 = multi_checksums(location, ('sha1', 'md5', 'sha256')).values()
result['sha1'] = sha1
result['md5'] = md5
result['sha256'] = sha256
collector = get_type(location)
result['mime_type'] = collector.mimetype_file or None
result['file_type'] = collector.filetype_file or None
result['programming_language'] = collector.programming_language or None
result['is_binary'] = bool(collector.is_binary)
result['is_text'] = bool(collector.is_text)
result['is_archive'] = bool(collector.is_archive)
result['is_media'] = bool(collector.is_media)
result['is_source'] = bool(collector.is_source)
result['is_script'] = bool(collector.is_script)
return result
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/scancode/api.py
| 0.54698 | 0.198258 |
api.py
|
pypi
|
# FIXME: the glob story is very weird!!!
examples_text = '''
Scancode command lines examples:
(Note for Windows: use '\\' back slash instead of '/' forward slash for paths.)
Scan a single file for copyrights. Print scan results to stdout as pretty JSON:
scancode --copyright samples/zlib/zlib.h --json-pp -
Scan a single file for licenses, print verbose progress to stderr as each
file is scanned. Save scan to a JSON file:
scancode --license --verbose samples/zlib/zlib.h --json licenses.json
Scan a directory explicitly for licenses and copyrights. Redirect JSON scan
results to a file:
scancode --license --copyright samples/zlib/ --json - > scan.json
Scan a directory while ignoring a single file. Scan for license, copyright and
package manifests. Use four parallel processes.
Print scan results to stdout as pretty formatted JSON.
scancode -lc --package --ignore README --processes 4 --json-pp - samples/
Scan a directory while ignoring all files with .txt extension.
Print scan results to stdout as pretty formatted JSON.
It is recommended to use quotes around glob patterns to prevent pattern
expansion by the shell:
scancode --json-pp - --ignore "*.txt" samples/
Special characters supported in GLOB pattern:
- * matches everything
- ? matches any single character
- [seq] matches any character in seq
- [!seq] matches any character not in seq
For a literal match, wrap the meta-characters in brackets.
For example, '[?]' matches the character '?'.
For details on GLOB patterns see https://en.wikipedia.org/wiki/Glob_(programming).
Note: Glob patterns cannot be applied to path as strings.
For example, this will not ignore "samples/JGroups/licenses".
scancode --json - --ignore "samples*licenses" samples/
Scan a directory while ignoring multiple files (or glob patterns).
Print the scan results to stdout as JSON:
scancode --json - --ignore README --ignore "*.txt" samples/
Scan a directory for licenses and copyrights. Save scan results to an
HTML file:
scancode --license --copyright --html scancode_result.html samples/zlib
'''
epilog_text = '''Examples (use --examples for more):
\b
Scan the 'samples' directory for licenses and copyrights.
Save scan results to the 'scancode_result.json' JSON file:
scancode --license --copyright --json-pp scancode_result.json samples
\b
Scan the 'samples' directory for licenses and package manifests. Print scan
results on screen as pretty-formatted JSON (using the special '-' FILE to print
to on screen/to stdout):
scancode --json-pp - --license --package samples
Note: when you run scancode, a progress bar is displayed with a counter of the
number of files processed. Use --verbose to display file-by-file progress.
'''
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/scancode/help.py
| 0.430866 | 0.350866 |
help.py
|
pypi
|
import string
import re
import ipaddress
import urlpy
from commoncode.text import toascii
from cluecode import finder_data
from textcode import analysis
# Tracing flags
TRACE = False
TRACE_URL = False
TRACE_EMAIL = False
def logger_debug(*args):
pass
if TRACE or TRACE_URL or TRACE_EMAIL:
import logging
import sys
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
"""
Find patterns in text lines such as a emails and URLs.
Optionally apply filters to pattern matches.
"""
def find(location, patterns):
"""
Yield match and matched lines for patterns found in file at location as a
tuple of (key, found text, text line). `patterns` is a list of tuples (key,
compiled regex).
Note: the location can be a list of lines for testing convenience.
"""
if TRACE:
from pprint import pformat
loc = pformat(location)
logger_debug('find(location=%(loc)r,\n patterns=%(patterns)r)' % locals())
for line_number, line in analysis.numbered_text_lines(location, demarkup=False):
for key, pattern in patterns:
for match in pattern.findall(line):
if TRACE:
logger_debug('find: yielding match: key=%(key)r, '
'match=%(match)r,\n line=%(line)r' % locals())
yield key, toascii(match), line, line_number
def unique_filter(matches):
"""
Iterate over matches and yield unique matches.
"""
uniques = set()
for key, match, line, line_number in matches:
if (key, match,) in uniques:
continue
uniques.add((key, match,))
yield key, match, line, line_number
def apply_filters(matches, *filters):
"""
Apply a sequence of `filters` to a `matches` iterable. Return a new filtered
matches iterable.
A filter must accept a single arg: an iterable of tuples of (key, match,
line, line_number) and must return an iterable of tuples of (key, match, line,
line_number).
"""
for filt in filters:
matches = filt(matches)
return matches
def build_regex_filter(pattern):
"""
Return a filter function using regex pattern, filtering out matches
matching this regex. The pattern should be text, not a compiled re.
"""
def re_filt(matches):
if TRACE:
logger_debug('re_filt: pattern="{}"'.format(pattern))
for key, match, line, line_number in matches:
if matcher(match):
if TRACE:
logger_debug('re_filt: filtering match: "{}"'.format(match))
continue
yield key, match, line, line_number
matcher = re.compile(pattern, re.UNICODE | re.IGNORECASE).match
return re_filt
# A good reference page of email address regex is:
# http://fightingforalostcause.net/misc/2006/compare-email-regex.php email
# regex from http://www.regular-expressions.info/regexbuddy/email.html
def emails_regex():
return re.compile('\\b[A-Z0-9._%-]+@[A-Z0-9.-]+\\.[A-Z]{2,4}\\b', re.IGNORECASE)
def find_emails(location, unique=True):
"""
Yield an iterable of (email, line_number) found in file at ``location``.
Only return unique items if ``unique`` is True.
"""
patterns = [('emails', emails_regex(),)]
matches = find(location, patterns)
if TRACE_EMAIL:
matches = list(matches)
for r in matches:
logger_debug('find_emails: match:', r)
filters = (junk_email_domains_filter, uninteresting_emails_filter)
if unique:
filters += (unique_filter,)
matches = apply_filters(matches, *filters)
for _key, email, _line, line_number in matches:
yield email, line_number
def junk_email_domains_filter(matches):
"""
Given an iterable of email matches, return an iterable where email with
common uninteresting domains have been removed, such as local, non public
or example.com emails.
"""
for key, email, line, line_number in matches:
if is_good_email_domain(email):
yield key, email, line, line_number
else:
if TRACE:
logger_debug(f'junk_email_domains_filter: !is_good_host: {email!r}')
def is_good_email_domain(email):
"""
Return True if the domain of the ``email`` string is valid, False otherwise
such as for local, non public domains.
For example::
>>> is_good_email_domain("[email protected]")
True
>>> is_good_email_domain("[email protected]")
False
>>> is_good_email_domain("[email protected]")
False
"""
if not email:
return False
_dest, _, server = email.partition('@')
if not is_good_host(server):
return False
fake_url = f'http://{server}'
_host, domain = url_host_domain(fake_url)
if not is_good_host(domain):
return False
return True
def uninteresting_emails_filter(matches):
"""
Given an iterable of emails matches, return an iterable where common
uninteresting emails have been removed.
"""
for key, email, line, line_number in matches:
good_email = finder_data.classify_email(email)
if not good_email:
continue
yield key, email, line, line_number
# TODO: consider: http://www.regexguru.com/2008/11/detecting-urls-in-a-block-of-text/
# TODO: consider: http://blog.codinghorror.com/the-problem-with-urls/
schemes = 'https?|ftps?|sftp|rsync|ssh|svn|git|hg|https?\\+git|https?\\+svn|https?\\+hg'
url_body = '[^\\s<>\\[\\]"]'
def urls_regex():
# no space, no < >, no [ ] and no double quote
return re.compile('''
(
# URLs with schemes
(?:%(schemes)s)://%(url_body)s+
|
# common URLs prefix without schemes
(?:www|ftp)\\.%(url_body)s+
|
# git style [email protected]:christophercantu/pipeline.git
git\\@%(url_body)s+:%(url_body)s+\\.git
)''' % globals()
, re.UNICODE | re.VERBOSE | re.IGNORECASE)
INVALID_URLS_PATTERN = '((?:' + schemes + ')://([$%*/_])+)'
def find_urls(location, unique=True):
"""
Yield an iterable of (url, line_number) found in file at ``location``.
Only return unique items if ``unique`` is True.
`location` can be a list of strings for testing.
"""
patterns = [('urls', urls_regex(),)]
matches = find(location, patterns)
if TRACE:
matches = list(matches)
for m in matches:
logger_debug('url match:', m)
# the order of filters IS important
filters = (
verbatim_crlf_url_cleaner,
end_of_url_cleaner,
empty_urls_filter,
scheme_adder,
user_pass_cleaning_filter,
build_regex_filter(INVALID_URLS_PATTERN),
canonical_url_cleaner,
junk_url_hosts_filter,
junk_urls_filter,
)
if unique:
filters += (unique_filter,)
matches = apply_filters(matches, *filters)
for _key, url, _line, line_number in matches:
if TRACE_URL:
logger_debug('find_urls: line_number:', line_number, '_line:', repr(_line),
'type(url):', type(url), 'url:', repr(url))
yield str(url), line_number
EMPTY_URLS = set(['https', 'http', 'ftp', 'www', ])
def empty_urls_filter(matches):
"""
Given an iterable of URL matches, return an iterable without empty URLs.
"""
for key, match, line, line_number in matches:
junk = match.lower().strip(string.punctuation).strip()
if not junk or junk in EMPTY_URLS:
if TRACE:
logger_debug('empty_urls_filter: filtering match: %(match)r' % locals())
continue
yield key, match, line, line_number
def verbatim_crlf_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where literal end of
lines and carriage return characters that may show up as-is, un-encoded in
a URL have been removed.
"""
# FIXME: when is this possible and could happen?
for key, url, line, line_number in matches:
if not url.endswith('/'):
url = url.replace('\n', '')
url = url.replace('\r', '')
yield key, url, line, line_number
def end_of_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where junk characters
commonly found at the end of a URL are removed.
This is not entirely correct, but works practically.
"""
for key, url, line, line_number in matches:
if not url.endswith('/'):
url = url.replace(u'<', u'<')
url = url.replace(u'>', u'>')
url = url.replace(u'&', u'&')
url = url.rstrip(string.punctuation)
url = url.split(u'\\')[0]
url = url.split(u'<')[0]
url = url.split(u'>')[0]
url = url.split(u'(')[0]
url = url.split(u')')[0]
url = url.split(u'[')[0]
url = url.split(u']')[0]
url = url.split(u'"')[0]
url = url.split(u"'")[0]
yield key, url, line, line_number
non_standard_urls_prefix = ('git@',)
def is_filterable(url):
"""
Return True if a url is eligible for filtering. Certain URLs should not pass
through certain filters (such as a [email protected] style urls)
"""
return not url.startswith(non_standard_urls_prefix)
def scheme_adder(matches):
"""
Add a fake http:// scheme if there was none.
"""
for key, match, line, line_number in matches:
if is_filterable(match):
match = add_fake_scheme(match)
yield key, match, line, line_number
def add_fake_scheme(url):
"""
Add a fake http:// scheme to URL if has none.
"""
if not has_scheme(url):
url = 'http://' + url.lstrip(':/').strip()
return url
def has_scheme(url):
"""
Return True if url has a scheme.
"""
return re.match('^(?:%(schemes)s)://.*' % globals(), url, re.UNICODE)
def user_pass_cleaning_filter(matches):
"""
Given an iterable of URL matches, return an iterable where user and
password are removed from the URLs host.
"""
for key, match, line, line_number in matches:
if is_filterable(match):
host, _domain = url_host_domain(match)
if not host:
if TRACE:
logger_debug('user_pass_cleaning_filter: '
'filtering match(no host): %(match)r' % locals())
continue
if '@' in host:
# strips any user/pass
host = host.split(u'@')[-1]
yield key, match, line, line_number
DEFAULT_PORTS = {
'http': 80,
'https': 443
}
def canonical_url(uri):
"""
Return the canonical representation of a given URI.
This assumes the `uri` has a scheme.
* When a default port corresponding for the scheme is explicitly declared
(such as port 80 for http), the port will be removed from the output.
* Fragments '#' are not removed.
* Params and query string arguments are not reordered.
"""
try:
parsed = urlpy.parse(uri)
if not parsed:
return
if TRACE:
logger_debug('canonical_url: parsed:', parsed)
sanitized = parsed.sanitize()
if TRACE:
logger_debug('canonical_url: sanitized:', sanitized)
punycoded = sanitized.punycode()
if TRACE:
logger_debug('canonical_url: punycoded:', punycoded)
deport = punycoded.remove_default_port()
if TRACE:
logger_debug('canonical_url: deport:', deport)
return str(sanitized)
except Exception as e:
if TRACE:
logger_debug('canonical_url: failed for:', uri, 'with:', repr(e))
# ignore it
pass
def canonical_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where URLs have been
canonicalized.
"""
for key, match, line, line_number in matches:
if is_filterable(match):
canonical = canonical_url(match)
if TRACE:
logger_debug('canonical_url_cleaner: '
'match=%(match)r, canonical=%(canonical)r' % locals())
match = canonical
if match:
yield key, match , line, line_number
IP_V4_RE = '^(\\d{1,3}\\.){0,3}\\d{1,3}$'
def is_ip_v4(s):
return re.compile(IP_V4_RE, re.UNICODE).match(s)
IP_V6_RE = (
'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}$'
'|'
'^([0-9a-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}\\d{1,3}$'
)
def is_ip_v6(s):
"""
Return True is string s is an IP V6 address
"""
return re.compile(IP_V6_RE, re.UNICODE).match(s)
def is_ip(s):
"""
Return True is string s is an IP address
"""
return is_ip_v4(s) or is_ip_v6(s)
def get_ip(s):
"""
Return True is string s is an IP address
"""
if not is_ip(s):
return False
try:
ip = ipaddress.ip_address(str(s))
return ip
except ValueError:
return False
def is_private_ip(ip):
"""
Return true if ip object is a private or local IP.
"""
if ip:
if isinstance(ip, ipaddress.IPv4Address):
private = (
ip.is_reserved
or ip.is_private
or ip.is_multicast
or ip.is_unspecified
or ip.is_loopback
or ip.is_link_local
)
else:
private(
ip.is_multicast
or ip.is_reserved
or ip.is_link_local
or ip.is_site_local
or ip.is_private
or ip.is_unspecified
or ip.is_loopback
)
return private
def is_good_host(host):
"""
Return True if the host is not some local or uninteresting host.
"""
if not host:
return False
ip = get_ip(host)
if ip:
if is_private_ip(ip):
return False
return finder_data.classify_ip(host)
# at this stage we have a host name, not an IP
if '.' not in host:
# private hostnames not in a domain, including localhost
return False
good_host = finder_data.classify_host(host)
return good_host
def url_host_domain(url):
"""
Return a tuple of the (host, domain) of a URL or None. Assumes that the
URL has a scheme.
"""
try:
parsed = urlpy.parse(url)
host = parsed.host
if not host:
return None, None
domain = parsed.pld
return host.lower(), domain.lower()
except Exception as e:
if TRACE:
logger_debug('url_host_domain: failed for:', url, 'with:', repr(e))
# ignore it
return None, None
def junk_url_hosts_filter(matches):
"""
Given an iterable of URL matches, return an iterable where URLs with
common uninteresting hosts or domains have been removed, such as local,
non public or example.com URLs.
"""
for key, match, line, line_number in matches:
if is_filterable(match):
host, domain = url_host_domain(match)
if not is_good_host(host):
if TRACE:
logger_debug('junk_url_hosts_filter: '
'!is_good_host:%(host)r): %(match)r' % locals())
continue
if not is_good_host(domain) and not is_ip(host):
if TRACE:
logger_debug('junk_url_hosts_filter: ''!is_good_host:%(domain)r '
'and !is_ip:%(host)r: %(match)r' % locals())
continue
yield key, match, line, line_number
def junk_urls_filter(matches):
"""
Given an iterable of URL matches, return an iterable where URLs with
common uninteresting URLs, or uninteresting URL hosts or domains have been
removed, such as local, non public or example.com URLs.
"""
for key, match, line, line_number in matches:
good_url = finder_data.classify_url(match)
if not good_url:
if TRACE:
logger_debug('junk_url_filter: %(match)r' % locals())
continue
yield key, match, line, line_number
def find_pattern(location, pattern, unique=False):
"""
Find regex pattern in the text lines of file at location.
Return all match groups joined as one unicode string.
Only return unique items if unique is True.
"""
pattern = re.compile(pattern, re.UNICODE | re.IGNORECASE)
matches = find(location, [(None, pattern,)])
if unique:
matches = unique_filter(matches)
for _key, match , _line, line_number in matches:
yield match, line_number
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/cluecode/finder.py
| 0.543106 | 0.197735 |
finder.py
|
pypi
|
The purpose of `packagedcode` is to:
- detect a package,
- determine its dependencies,
- collect its declared licensing (at the metadata/manifest level)
vs. its actual license (as scanned and normalized).
1. **detect the presence of a package** in a codebase based on its manifest, its file
or archive type. Typically it is a third party package but it may be your own.
Taking Python as a main example a package can exist in multiple forms:
1.1. as a **source checkout** (or some source archive such as a source
distribution or an `sdist`) where the presence of a `setup.py` or some
`requirements.txt` file is the key marker for Python. For Maven it would be a
`pom.xml` or a `build.gradle` file, for Ruby a `Gemfile` or `Gemfile.lock`, the
presence of autotools files, and so on, with the goal to eventually covering all
the packages formats/types that are out there and commonly used.
1.2. as an **installable archive or binary** such as a Pypi wheel `.whl` or
`.egg`, a Maven `.jar`, a Ruby `.gem`, a `.nupkg` for a Nuget, a `.rpm` or `.deb`
Linux package, etc... Here the type, shape and name structure of an archive as
well as some its files content are the key markers for detection. The metadata
may also be included in that archive as a file or as some headers (e.g. RPMs)
1.3. as an **installed packaged** such as when you `pip install` a Python package
or `bundle install` Ruby gems or `npm install` node modules. Here the key markers
may be some combo of a typical or conventional directory layout and presence of
specific files such as the metadata installed with a Python `wheel`, a `vendor`
directory for Ruby, some `node_modules` directory tree for npms, or a certain
file type with metadata such as Windows DLLs. Additional markers may also include
"namespaces" such as Java or Python imports, C/C++ namespace declarations.
2. **parse and collect the package datafile or manifest(s)** metadata. For Python, this means
extracting name, version, authorship, declared licensing and declared dependencies as
found in the any of the package descriptor files (e.g. a `setup.py` file,
`requirements` file(s) or any of the `*-dist-info` or `*-egg-info` dir files such as
a `metadata.json`). Other package datafile formats have their own metatada that may be more or
less comprehensive in the breadth and depth of information they offer (e.g.
`.nuspec`, `package.json`, `bower.json`, Godeps, etc...). These metadata include the
declared dependencies (and in some cases the fully resolved dependencies too such as
with Gemfile.lock). Finally, all the different packages formats and data are
normalized and stored in a common data structure abstracting the small differences of
naming and semantics that may exists between all the different package formats.
Once collected, these data are then injected in the `package_data` section of a file scan
for each recognized package datafile.
3. **assemble multiple package datafile** as top level packages.
What code in `packagedcode` is not meant to do:
A. **download packages** from a thirdparty repository: there is code planned for
another tool that will specifically deal with this and also handle collecting
the metadata as served by a package repository (which are in most cases --but not
always-- the same as what is declared in the manifests).
B. **resolve dependencies**: the focus here is on a purely static analysis that by
design does not rely on any network access at runtime. To scan for actually used
dependencies the process is to scan an as-built or as-installed or as-deployed
odebase where the dependencies have already been provisioned and installed
ScanCode will also detect these.
There is also a planned prototype for a dynamic multi-package dependencies
resolver that actually runs live the proper tool to resolve and collect dependencies
(e.g. effectively running Maven, bundler, pip, npm, gradle, bower, go get/dep, etc).
This will be a tool separate from ScanCode as this requires having several/all
package managers installed (and possibly multiple versions of each) and may run code
from the codebase (e.g. a setup.py) and access the network for fetching or resolving
dependencies. It could be also exposed as a web service that can take in a manifest
and package and run the dep resolution safely in an isolated environment (e.g. a
chroot jail or docker container) and return the collected deps.
C. **match packages** (and files) to actual repositories or registries, e.g. given a
scan detecting packages, matching will look them up in a remote package
repository or a local index and possibly using A. and/or B. additionally if needed.
Here again there is a planned code and a tool that will deal specifically with
this aspect and will handle also building an index of actual registries/repositories
and matching using hashes and fingerprints.
And now some answers to questions originally reported by @sschuberth:
> This does not download the source code of a Python package to run ScanCode over it.
Correct. The assumption with ScanCode proper is that the deps have been fetched in the
code you scan if you want to scan for deps. Packages will be detected with their declared
deps, but the deps will neither be resolved nor fetched. As a second step we could also
verify that all the declared deps are present in the scanned code as detected packages.
> This means that cases where the license from the metadata is wrong compared to the LICENSE
file in the source code will not be detected.
Both the metadata and the file level licenses (such as a header comment or a
`LICENSE` file of sorts) are detected by ScanCode: the license scan detect the
licenses while the package scan collects the declared licensing in the metadata. The
interesting thing thanks to this combo is that conflicts (or incomplete
data) can be analyzed and an automated deduction process is feasible: given a
scan for packages and licenses and copyrights, do the package metadata
asserted/declared licenses match the actual detected licenses? If not this could be
reported as an "error" condition... Furthermore, this could be refined based on
classification of the files: a package may assert a top level `MIT` license and use a
GPL-licensed build script. By knowing that the build script is indeed a build script,
we could report that the GPL detected in such script does not conflict with the
overall declared MIT license of the package. The same could be done with test
scripts/code, or documentation code (such as doxygen-generated docs)
> Licenses from transitive dependencies are not taken into account.
If the transitive dependencies have been resolved and their code is present in the
codebase, then they would be caught by a static ScanCode scan and eventually scanned
both for package metadata and/or license detection. There are some caveats to deal
with because some tools (e.g. Maven) may not store the corresponding artifacts/Jars
locally (e.g. side-by-side with a given checkout) and use a `~/user` "global" dot
directory to store a cache instead.
Beyond this, actual dependency resolution of a single package or a complete manifest
will be the topic of another tool as mentioned above.
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/packagedcode/README.rst
| 0.895731 | 0.754418 |
README.rst
|
pypi
|
import ast
from pathlib import Path
"""
Parse setup.py files.
"""
# https://setuptools.readthedocs.io/en/latest/setuptools.html#metadata
FIELDS = {
'author_email',
'author',
'classifiers',
'dependency_links',
'description',
'download_url',
'extras_require',
'install_requires',
'keywords',
'license_file',
'license',
'long_description_content_type',
'long_description',
'maintainer_email',
'maintainer',
'metadata_version',
'name',
'obsoletes',
'package_dir',
'platforms',
'project_urls',
'provides',
'python_requires',
'requires',
'setup_requires',
'tests_require',
'url',
'version',
}
def is_setup_call(element):
"""
Return if the AST ``element`` is a call to the setup() function.
Note: this is derived from the code in packagedcode.pypi.py
"""
if (
isinstance(element, ast.Call)
and (
hasattr(element, 'func')
and isinstance(element.func, ast.Name)
and getattr(element.func, 'id', None) == 'setup'
) or (
hasattr(element, 'func')
and isinstance(element.func, ast.Attribute)
and getattr(element.func, 'attr', None) == 'setup'
and isinstance(element.func.value, ast.Name)
and getattr(element.func.value, 'id', None) == 'setuptools'
)
):
return True
def parse_setup_py(location):
"""
Return a mapping of setuptools.setup() call argument found in a setup.py
file at ``location`` or an empty mapping.
"""
path = Path(location)
tree = tuple(ast.parse(path.read_text(encoding='utf8')).body)
body = tuple(get_body(tree))
call = get_setup_call(tree)
result = get_call_kwargs(call, body)
return clean_setup(result)
def get_body(elements):
"""
Yield the body from ``elements`` as a single iterable.
"""
for element in elements:
if isinstance(element, ast.FunctionDef):
yield from get_body(element.body)
continue
if isinstance(element, ast.If):
yield from get_body(element.body)
if isinstance(element, ast.Expr):
yield element.value
continue
yield element
def get_setup_call(elements):
"""
Return a setup() method call found in the ``elements`` or None.
"""
for element in get_body(elements):
if is_setup_call(element):
return element
elif isinstance(element, (ast.Assign,)):
if isinstance(element.value, ast.Call):
if is_setup_call(element.value):
return element.value
def node_to_value(node, body):
"""
Return the extracted and converted value of a node or None
"""
if node is None:
return
if hasattr(ast, 'Constant'):
if isinstance(node, ast.Constant):
return node.value
if isinstance(node, ast.Str):
return node.s
if isinstance(node, ast.Num):
return node.n
if isinstance(node, (ast.List, ast.Tuple, ast.Set,)):
return [node_to_value(subnode, body) for subnode in node.elts]
if isinstance(node, ast.Dict):
result = {}
for key, value in zip(node.keys, node.values):
result[node_to_value(key, body)] = node_to_value(value, body)
return result
if isinstance(node, ast.Name):
variable = find_variable_in_body(body, node.id)
if variable is not None:
return node_to_value(variable, body)
if isinstance(node, ast.Call):
if not isinstance(node.func, ast.Name):
return
if node.func.id != 'dict':
return
return get_call_kwargs(node, body)
return
def find_variable_in_body(body, name):
"""
Return the value of the variable ``name`` found in the ``body`` ast tree or None.
"""
for elem in body:
if not isinstance(elem, ast.Assign):
continue
for target in elem.targets:
if not isinstance(target, ast.Name):
continue
if target.id == name:
return elem.value
def get_call_kwargs(node: ast.Call, body):
"""
Return a mapping of setup() method call keyword arguments.
"""
result = {}
keywords = getattr(node, 'keywords', []) or []
for keyword in keywords:
# dict unpacking
if keyword.arg is None:
value = node_to_value(keyword.value, body)
if isinstance(value, dict):
result.update(value)
continue
# keyword argument
value = node_to_value(keyword.value, body)
if value is None:
continue
result[keyword.arg] = value
return result
def clean_setup(data):
"""
Return a cleaned mapping from a setup ``data`` mapping.
"""
result = {k: v
for k, v in data.items()
if k in FIELDS
and (v and v is not False)
and str(v) != 'UNKNOWN'
}
# split keywords in words
keywords = result.get('keywords')
if keywords and isinstance(keywords, str):
# some keywords are separated by coma, some by space or lines
if ',' in keywords:
keywords = [k.strip() for k in keywords.split(',')]
else:
keywords = keywords.split()
result['keywords'] = keywords
return result
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/packagedcode/pypi_setup_py.py
| 0.793826 | 0.294653 |
pypi_setup_py.py
|
pypi
|
from packagedcode import models
"""
Various package data file formats to implment.
"""
# Package types
# NOTE: this is somewhat redundant with extractcode archive handlers
# yet the purpose and semantics are rather different here
# TODO: parse me!!!
# TODO: add missing URLs and descriptions
class JavaJarHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'java_jar'
# NOTE: there are a few rare cases where a .zip can be a JAR.
path_patterns = ('*.jar',)
filetypes = ('zip archive', 'java archive',)
description = 'JAR Java Archive'
documentation_url = 'https://en.wikipedia.org/wiki/JAR_(file_format)'
class IvyXmlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'ant_ivy_xml'
path_patterns = ('*/ivy.xml',)
default_package_type = 'ivy'
default_primary_language = 'Java'
description = 'Ant IVY dependency file'
documentation_url = 'https://ant.apache.org/ivy/history/latest-milestone/ivyfile.html'
class JavaWarHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'java_war_archive'
path_patterns = ('*.war',)
filetypes = ('zip archive',)
default_package_type = 'war'
default_primary_language = 'Java'
description = 'Java Web Application Archive'
documentation_url = 'https://en.wikipedia.org/wiki/WAR_(file_format)'
class JavaWarWebXmlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'java_war_web_xml'
path_patterns = ('*/WEB-INF/web.xml',)
filetypes = ('zip archive',)
default_package_type = 'war'
default_primary_language = 'Java'
description = 'Java WAR web/xml'
documentation_url = 'https://en.wikipedia.org/wiki/WAR_(file_format)'
class JavaEarHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'java_ear_archive'
default_package_type = 'ear'
default_primary_language = 'Java'
path_patterns = ('*.ear',)
filetypes = ('zip archive',)
description = 'Java EAR Enterprise application archive'
documentation_url = 'https://en.wikipedia.org/wiki/EAR_(file_format)'
class JavaEarAppXmlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'java_ear_application_xml'
default_package_type = 'ear'
default_primary_language = 'Java'
path_patterns = ('*/META-INF/application.xml',)
description = 'Java EAR application.xml'
documentation_url = 'https://en.wikipedia.org/wiki/EAR_(file_format)'
class Axis2MarModuleXmlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'axis2_module_xml'
path_patterns = ('*/meta-inf/module.xml',)
default_package_type = 'axis2'
default_primary_language = 'Java'
description = 'Apache Axis2 module.xml'
documentation_url = 'https://axis.apache.org/axis2/java/core/docs/modules.html'
class Axis2MarArchiveHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'axis2_mar'
path_patterns = ('*.mar',)
filetypes = ('zip archive',)
default_package_type = 'axis2'
default_primary_language = 'Java'
description = 'Apache Axis2 module archive'
documentation_url = 'https://axis.apache.org/axis2/java/core/docs/modules.html'
class JBossSarHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'jboss_sar'
path_patterns = ('*.sar',)
filetypes = ('zip archive',)
default_package_type = 'jboss-service'
default_primary_language = 'Java'
description = 'JBOSS service archive'
documentation_url = 'https://docs.jboss.org/jbossas/docs/Server_Configuration_Guide/4/html/ch02s01.html'
class JBossServiceXmlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'jboss_service_xml'
path_patterns = ('*/meta-inf/jboss-service.xml',)
default_package_type = 'jboss-service'
default_primary_language = 'Java'
description = 'JBOSS service.xml'
documentation_url = 'https://docs.jboss.org/jbossas/docs/Server_Configuration_Guide/4/html/ch02s01.html'
class MeteorPackageHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'meteor_package'
path_patterns = ('*/package.js',)
default_package_type = 'meteor'
default_primary_language = 'JavaScript'
description = 'Meteor package.js'
documentation_url = 'https://docs.meteor.com/api/packagejs.html'
class CpanManifestHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'cpan_manifest'
path_patterns = ('*/MANIFEST',)
default_package_type = 'cpan'
default_primary_language = 'Perl'
description = 'CPAN Perl module MANIFEST'
documentation_url = 'https://metacpan.org/pod/Module::Manifest'
class CpanMakefilePlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'cpan_makefile'
path_patterns = ('*/Makefile.PL',)
default_package_type = 'cpan'
default_primary_language = 'Perl'
description = 'CPAN Perl Makefile.PL'
documentation_url = 'https://www.perlmonks.org/?node_id=128077'
# http://blogs.perl.org/users/neilb/2017/04/an-introduction-to-distribution-metadata.html
# Version 2+ data is what you’ll find in META.json
# Version 1.4 data is what you’ll find in META.yml
class CpanMetaYmlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'cpan_meta_yml'
path_patterns = ('*/META.yml',)
default_package_type = 'cpan'
default_primary_language = 'Perl'
description = 'CPAN Perl META.yml'
documentation_url = 'https://metacpan.org/pod/CPAN::Meta::YAML'
class CpanMetaJsonHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'cpan_meta_json'
path_patterns = ('*/META.json',)
default_package_type = 'cpan'
default_primary_language = 'Perl'
description = 'CPAN Perl META.json'
documentation_url = 'https://metacpan.org/pod/Parse::CPAN::Meta'
class CpanDistIniHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'cpan_dist_ini'
path_patterns = ('*/dist.ini',)
default_package_type = 'cpan'
default_primary_language = 'Perl'
description = 'CPAN Perl dist.ini'
documentation_url = 'https://metacpan.org/pod/Dist::Zilla::Tutorial'
class AndroidAppArchiveHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'android_apk'
default_package_type = 'android'
default_primary_language = 'Java'
path_patterns = ('*.apk',)
filetypes = ('zip archive',)
description = 'Android application package'
documentation_url = 'https://en.wikipedia.org/wiki/Apk_(file_format)'
# see http://tools.android.com/tech-docs/new-build-system/aar-formats
class AndroidLibraryHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'android_aar_library'
default_package_type = 'android_lib'
default_primary_language = 'Java'
# note: Apache Axis also uses AAR path_patterns for plain Jars.
# this could be decided based on internal structure
path_patterns = ('*.aar',)
filetypes = ('zip archive',)
description = 'Android library archive'
documentation_url = 'https://developer.android.com/studio/projects/android-library'
class MozillaExtensionHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'mozilla_xpi'
path_patterns = ('*.xpi',)
filetypes = ('zip archive',)
default_package_type = 'mozilla'
default_primary_language = 'JavaScript'
description = 'Mozilla XPI extension'
documentation_url = 'https://en.wikipedia.org/wiki/XPInstall'
class ChromeExtensionHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'chrome_crx'
path_patterns = ('*.crx',)
filetypes = ('zip archive',)
default_package_type = 'chrome'
default_primary_language = 'JavaScript'
description = 'Chrome extension'
documentation_url = 'https://chrome.google.com/extensions'
class IosAppIpaHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'ios_ipa'
default_package_type = 'ios'
default_primary_language = 'Objective-C'
path_patterns = ('*.ipa',)
filetypes = ('microsoft cabinet',)
description = 'iOS package archive'
documentation_url = 'https://en.wikipedia.org/wiki/.ipa'
class CabArchiveHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'microsoft_cabinet'
default_package_type = 'cab'
default_primary_language = 'C'
path_patterns = ('*.cab',)
filetypes = ('microsoft cabinet',)
description = 'Microsoft cabinet archive'
documentation_url = 'https://docs.microsoft.com/en-us/windows/win32/msi/cabinet-files'
class InstallShieldPackageHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'installshield_installer'
default_package_type = 'installshield'
path_patterns = ('*.exe',)
filetypes = ('zip installshield',)
description = 'InstallShield installer'
documentation_url = 'https://www.revenera.com/install/products/installshield'
class NsisInstallerHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'nsis_installer'
default_package_type = 'nsis'
path_patterns = ('*.exe',)
filetypes = ('nullsoft installer',)
description = 'NSIS installer'
documentation_url = 'https://nsis.sourceforge.io/Main_Page'
class SharArchiveHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'shar_shell_archive'
default_package_type = 'shar'
path_patterns = ('*.shar',)
filetypes = ('posix shell script',)
description = 'shell archive'
documentation_url = 'https://en.wikipedia.org/wiki/Shar'
class AppleDmgHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'apple_dmg'
default_package_type = 'dmg'
path_patterns = ('*.dmg', '*.sparseimage',)
description = ''
documentation_url = ''
class IsoImageHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'iso_disk_image'
default_package_type = 'iso'
path_patterns = ('*.iso', '*.udf', '*.img',)
filetypes = ('iso 9660 cd-rom', 'high sierra cd-rom',)
description = 'ISO disk image'
documentation_url = 'https://en.wikipedia.org/wiki/ISO_9660'
class SquashfsImageHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'squashfs_disk_image'
default_package_type = 'squashfs'
filetypes = ('squashfs',)
description = 'Squashfs disk image'
documentation_url = 'https://en.wikipedia.org/wiki/SquashFS'
# TODO: Add VM images formats(VMDK, OVA, OVF, VDI, etc) and Docker/other containers
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/packagedcode/misc.py
| 0.428951 | 0.199191 |
misc.py
|
pypi
|
import io
import re
import attr
from packageurl import PackageURL
@attr.s()
class GoModule(object):
namespace = attr.ib(default=None)
name = attr.ib(default=None)
version = attr.ib(default=None)
module = attr.ib(default=None)
require = attr.ib(default=None)
exclude = attr.ib(default=None)
def purl(self, include_version=True):
version = None
if include_version:
version = self.version
return PackageURL(
type='golang',
namespace=self.namespace,
name=self.name,
version=version
).to_string()
# Regex expressions to parse different types of go.mod file dependency
parse_module = re.compile(
r'(?P<type>[^\s]+)'
r'(\s)+'
r'(?P<ns_name>[^\s]+)'
r'\s?'
r'(?P<version>(.*))'
).match
parse_dep_link = re.compile(
r'.*?'
r'(?P<ns_name>[^\s]+)'
r'\s+'
r'(?P<version>(.*))'
).match
def preprocess(line):
"""
Return line string after removing commented portion and excess spaces.
"""
if "//" in line:
line = line[:line.index('//')]
line = line.strip()
return line
def parse_gomod(location):
"""
Return a dictionary containing all the important go.mod file data.
Handle go.mod files from Go.
See https://golang.org/ref/mod#go.mod-files for details
For example:
module example.com/my/thing
go 1.12
require example.com/other/thing v1.0.2
require example.com/new/thing v2.3.4
exclude example.com/old/thing v1.2.3
require (
example.com/new/thing v2.3.4
example.com/old/thing v1.2.3
)
require (
example.com/new/thing v2.3.4
example.com/old/thing v1.2.3
)
Each module line is in the form
require github.com/davecgh/go-spew v1.1.1
or
exclude github.com/davecgh/go-spew v1.1.1
or
module github.com/alecthomas/participle
For example::
>>> p = parse_module('module github.com/alecthomas/participle')
>>> assert p.group('type') == ('module')
>>> assert p.group('ns_name') == ('github.com/alecthomas/participle')
>>> p = parse_module('require github.com/davecgh/go-spew v1.1.1')
>>> assert p.group('type') == ('require')
>>> assert p.group('ns_name') == ('github.com/davecgh/go-spew')
>>> assert p.group('version') == ('v1.1.1')
A line for require or exclude can be in the form:
github.com/davecgh/go-spew v1.1.1
For example::
>>> p = parse_dep_link('github.com/davecgh/go-spew v1.1.1')
>>> assert p.group('ns_name') == ('github.com/davecgh/go-spew')
>>> assert p.group('version') == ('v1.1.1')
"""
with io.open(location, encoding='utf-8', closefd=True) as data:
lines = data.readlines()
gomods = GoModule()
require = []
exclude = []
for i, line in enumerate(lines):
line = preprocess(line)
if 'require' in line and '(' in line:
for req in lines[i + 1:]:
req = preprocess(req)
if ')' in req:
break
parsed_dep_link = parse_dep_link(req)
ns_name = parsed_dep_link.group('ns_name')
namespace, _, name = ns_name.rpartition('/')
if parsed_dep_link:
require.append(GoModule(
namespace=namespace,
name=name,
version=parsed_dep_link.group('version')
)
)
continue
if 'exclude' in line and '(' in line:
for exc in lines[i + 1:]:
exc = preprocess(exc)
if ')' in exc:
break
parsed_dep_link = parse_dep_link(exc)
ns_name = parsed_dep_link.group('ns_name')
namespace, _, name = ns_name.rpartition('/')
if parsed_dep_link:
exclude.append(GoModule(
namespace=namespace,
name=name,
version=parsed_dep_link.group('version')
)
)
continue
parsed_module_name = parse_module(line)
if parsed_module_name:
ns_name = parsed_module_name.group('ns_name')
namespace, _, name = ns_name.rpartition('/')
if 'module' in line:
gomods.namespace = namespace
gomods.name = name
continue
if 'require' in line:
require.append(GoModule(
namespace=namespace,
name=name,
version=parsed_module_name.group('version')
)
)
continue
if 'exclude' in line:
exclude.append(GoModule(
namespace=namespace,
name=name,
version=parsed_module_name.group('version')
)
)
continue
gomods.require = require
gomods.exclude = exclude
return gomods
# Regex expressions to parse go.sum file dependency
# dep example: github.com/BurntSushi/toml v0.3.1 h1:WXkYY....
get_dependency = re.compile(
r'(?P<ns_name>[^\s]+)'
r'\s+'
r'(?P<version>[^\s]+)'
r'\s+'
r'h1:(?P<checksum>[^\s]*)'
).match
def parse_gosum(location):
"""
Return a list of GoSum from parsing the go.sum file at `location`.
Handles go.sum file from Go.
See https://blog.golang.org/using-go-modules for details
A go.sum file contains pinned Go modules checksums of two styles:
For example::
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
... where the line with /go.mod is for a check of that go.mod file
and the other line contains a dirhash for that path as documented as
https://pkg.go.dev/golang.org/x/mod/sumdb/dirhash
For example::
>>> p = get_dependency('github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=')
>>> assert p.group('ns_name') == ('github.com/BurntSushi/toml')
>>> assert p.group('version') == ('v0.3.1')
>>> assert p.group('checksum') == ('WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=')
"""
with io.open(location, encoding='utf-8', closefd=True) as data:
lines = data.readlines()
gosums = []
for line in lines:
line = line.replace('/go.mod', '')
parsed_dep = get_dependency(line)
ns_name = parsed_dep.group('ns_name')
namespace, _, name = ns_name.rpartition('/')
dep = GoModule(
namespace=namespace,
name=name,
version=parsed_dep.group('version')
)
if dep in gosums:
continue
gosums.append(dep)
return gosums
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/packagedcode/go_mod.py
| 0.532911 | 0.210259 |
go_mod.py
|
pypi
|
from commoncode import fileutils
from packagedcode import models
"""
Handle README.*-style semi-structured package metadata.
These are seen in Android, Chromium and a few more places.
"""
# Common README field name mapped to known PackageData field name
PACKAGE_FIELD_BY_README_FIELD = {
'name': 'name',
'project': 'name',
'version': 'version',
'copyright': 'copyright',
'download link': 'download_url',
'downloaded from': 'download_url',
'homepage': 'homepage_url',
'website': 'homepage_url',
'repo': 'homepage_url',
'source': 'homepage_url',
'upstream': 'homepage_url',
'url': 'homepage_url',
'project url': 'homepage_url',
'licence': 'extracted_license_statement',
'license': 'extracted_license_statement',
}
class ReadmeHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'readme'
default_package_type = 'readme'
path_patterns = (
'*/README.android',
'*/README.chromium',
'*/README.facebook',
'*/README.google',
'*/README.thirdparty',
)
@classmethod
def parse(cls, location):
with open(location, encoding='utf-8') as loc:
readme_manifest = loc.read()
package_data = build_package(readme_manifest)
if not package_data.name:
# If no name was detected for the Package, then we use the basename
# of the parent directory as the Package name
parent_dir = fileutils.parent_directory(location)
parent_dir_basename = fileutils.file_base_name(parent_dir)
package_data.name = parent_dir_basename
yield package_data
def build_package(readme_manifest):
"""
Return a Package object from a readme_manifest mapping (from a
README.chromium file or similar) or None.
"""
package = models.PackageData(
datasource_id=ReadmeHandler.datasource_id,
type=ReadmeHandler.default_package_type,
)
for line in readme_manifest.splitlines():
line = line.strip()
if ':' in line:
key, _sep, value = line.partition(':')
elif '=' in line:
key, _sep, value = line.partition('=')
else:
key = None
value = None
if key:
key = key.lower().strip()
if value:
value = value.strip()
if not key or not value:
continue
package_key = PACKAGE_FIELD_BY_README_FIELD.get(key)
if not package_key:
continue
setattr(package, package_key, value)
package.populate_license_fields()
return package
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/packagedcode/readme.py
| 0.638835 | 0.247692 |
readme.py
|
pypi
|
import io
import json
from functools import partial
from packagedcode import models
"""
Parse PHP composer package manifests, see https://getcomposer.org/ and
https://packagist.org/
TODO: add support for composer.lock and packagist formats: both are fairly
similar.
"""
class BasePhpComposerHandler(models.DatafileHandler):
@classmethod
def assemble(cls, package_data, resource, codebase, package_adder):
datafile_name_patterns = (
'composer.json',
'composer.lock',
)
if resource.has_parent():
dir_resource = resource.parent(codebase)
else:
dir_resource = resource
yield from cls.assemble_from_many_datafiles(
datafile_name_patterns=datafile_name_patterns,
directory=dir_resource,
codebase=codebase,
package_adder=package_adder,
)
@classmethod
def assign_package_to_resources(cls, package, resource, codebase, package_adder):
return models.DatafileHandler.assign_package_to_parent_tree(package, resource, codebase, package_adder)
class PhpComposerJsonHandler(BasePhpComposerHandler):
datasource_id = 'php_composer_json'
path_patterns = ('*composer.json',)
default_package_type = 'composer'
default_primary_language = 'PHP'
default_relation_license = 'OR'
description = 'PHP composer manifest'
documentation_url = 'https://getcomposer.org/doc/04-schema.md'
@classmethod
def parse(cls, location):
"""
Yield one or more Package manifest objects given a file ``location``
pointing to a package archive, manifest or similar.
Note that this is NOT exactly the packagist.json format (all are closely
related of course but have important (even if minor) differences.
"""
with io.open(location, encoding='utf-8') as loc:
package_json = json.load(loc)
yield build_package_data(package_json)
def get_repository_homepage_url(namespace, name):
if namespace and name:
return f'https://packagist.org/packages/{namespace}/{name}'
elif name:
return f'https://packagist.org/packages/{name}'
def get_api_data_url(namespace, name):
if namespace and name:
return f'https://packagist.org/p/packages/{namespace}/{name}.json'
elif name:
return f'https://packagist.org/p/packages/{name}.json'
def build_package_data(package_data):
# Note: A composer.json without name and description is not a usable PHP
# composer package. Name and description fields are required but only for
# published packages: https://getcomposer.org/doc/04-schema.md#name We want
# to catch both published and non-published packages here. Therefore, we use
# None as a package name if there is no name.
ns_name = package_data.get('name')
is_private = False
if not ns_name:
ns = None
name = None
is_private = True
else:
ns, _, name = ns_name.rpartition('/')
package = models.PackageData(
datasource_id=PhpComposerJsonHandler.datasource_id,
type=PhpComposerJsonHandler.default_package_type,
namespace=ns,
name=name,
repository_homepage_url=get_repository_homepage_url(ns, name),
api_data_url=get_api_data_url(ns, name),
primary_language=PhpComposerJsonHandler.default_primary_language,
)
# mapping of top level composer.json items to the Package object field name
plain_fields = [
('version', 'version'),
('description', 'summary'),
('keywords', 'keywords'),
('homepage', 'homepage_url'),
]
for source, target in plain_fields:
value = package_data.get(source)
if isinstance(value, str):
value = value.strip()
if value:
setattr(package, target, value)
# mapping of top level composer.json items to a function accepting as
# arguments the composer.json element value and returning an iterable of
# key, values Package Object to update
field_mappers = [
('authors', author_mapper),
('license', partial(licensing_mapper, is_private=is_private)),
('support', support_mapper),
('require', partial(_deps_mapper, scope='require', is_runtime=True)),
('require-dev', partial(_deps_mapper, scope='require-dev', is_optional=True)),
('provide', partial(_deps_mapper, scope='provide', is_runtime=True)),
('conflict', partial(_deps_mapper, scope='conflict', is_runtime=True, is_optional=True)),
('replace', partial(_deps_mapper, scope='replace', is_runtime=True, is_optional=True)),
('suggest', partial(_deps_mapper, scope='suggest', is_runtime=True, is_optional=True)),
('source', source_mapper),
('dist', dist_mapper)
]
for source, func in field_mappers:
value = package_data.get(source)
if value:
if isinstance(value, str):
value = value.strip()
if value:
func(value, package)
# Parse vendor from name value
vendor_mapper(package)
# Per https://getcomposer.org/doc/04-schema.md#license this is an expression
package.populate_license_fields()
return package
class PhpComposerLockHandler(BasePhpComposerHandler):
datasource_id = 'php_composer_lock'
path_patterns = ('*composer.lock',)
default_package_type = 'composer'
default_primary_language = 'PHP'
description = 'PHP composer lockfile'
documentation_url = 'https://getcomposer.org/doc/01-basic-usage.md#commit-your-composer-lock-file-to-version-control'
@classmethod
def parse(cls, location):
with io.open(location, encoding='utf-8') as loc:
package_data = json.load(loc)
packages = [
build_package_data(p)
for p in package_data.get('packages', [])
]
packages_dev = [
build_package_data(p)
for p in package_data.get('packages-dev', [])
]
required_deps = [
build_dep_package(p, scope='require', is_runtime=True, is_optional=False)
for p in packages
]
required_dev_deps = [
build_dep_package(p, scope='require-dev', is_runtime=False, is_optional=True)
for p in packages_dev
]
yield models.PackageData(
datasource_id=cls.datasource_id,
type=cls.default_package_type,
primary_language=cls.default_primary_language,
dependencies=required_deps + required_dev_deps
)
for package in packages + packages_dev:
yield package
def licensing_mapper(licenses, package, is_private=False):
"""
Update package licensing and return package.
Licensing data structure has evolved over time and is a tad messy.
https://getcomposer.org/doc/04-schema.md#license
The value of license is either:
- an SPDX expression string: { "license": "(LGPL-2.1 or GPL-3.0+)" }
- a list of SPDX license ids choices: "license": ["LGPL-2.1","GPL-3.0+"]
Some older licenses are plain strings and not SPDX ids. Also if there is no
license and the `is_private` Fkag is True, we return a "proprietary-license"
license.
"""
if not licenses and is_private:
package.extracted_license_statement = 'proprietary-license'
return package
package.extracted_license_statement = licenses
return package
def author_mapper(authors_content, package):
"""
Update package parties with authors and return package.
https://getcomposer.org/doc/04-schema.md#authors
"""
for name, role, email, url in parse_person(authors_content):
role = role or 'author'
package.parties.append(
models.Party(type=models.party_person, name=name,
role=role, email=email, url=url))
return package
def support_mapper(support, package):
"""
Update support and bug tracking url.
https://getcomposer.org/doc/04-schema.md#support
"""
# TODO: there are many other information we ignore for now
package.bug_tracking_url = support.get('issues') or None
package.code_view_url = support.get('source') or None
return package
def source_mapper(source, package):
"""
Add vcs_url from source tag, if present. Typically only present in
composer.lock
"""
tool = source.get('type')
if not tool:
return package
url = source.get('url')
if not url:
return package
version = source.get('reference')
package.vcs_url = '{tool}+{url}@{version}'.format(**locals())
return package
def dist_mapper(dist, package):
"""
Add download_url from source tag, if present. Typically only present in
composer.lock
"""
url = dist.get('url')
if not url:
return package
package.download_url = url
return package
def vendor_mapper(package):
"""
Vendor is the first part of the name element.
https://getcomposer.org/doc/04-schema.md#name
"""
if package.namespace:
package.parties.append(
models.Party(type=models.party_person,
name=package.namespace, role='vendor'))
return package
def _deps_mapper(deps, package, scope, is_runtime=False, is_optional=False):
"""
Handle deps such as dependencies, devDependencies
return a tuple of (dep type, list of deps)
https://getcomposer.org/doc/04-schema.md#package-links
"""
for ns_name, requirement in deps.items():
ns, _, name = ns_name.rpartition('/')
purl = models.PackageURL(type='composer', namespace=ns, name=name).to_string()
dep = models.DependentPackage(
purl=purl,
extracted_requirement=requirement,
scope=scope,
is_runtime=is_runtime,
is_optional=is_optional)
package.dependencies.append(dep)
return package
def parse_person(persons):
"""
https://getcomposer.org/doc/04-schema.md#authors
A "person" is an object with a "name" field and optionally "url" and "email".
Yield a name, email, url tuple for a person object
A person can be in the form:
"authors": [
{
"name": "Nils Adermann",
"email": "[email protected]",
"homepage": "http://www.naderman.de",
"role": "Developer"
},
{
"name": "Jordi Boggiano",
"email": "[email protected]",
"homepage": "http://seld.be",
"role": "Developer"
}
]
Both forms are equivalent.
"""
if isinstance(persons, list):
for person in persons:
# ensure we have our three values
name = person.get('name')
role = person.get('role')
email = person.get('email')
url = person.get('homepage')
# FIXME: this got cargoculted from npm package.json parsing
yield (
name and name.strip(),
role and role.strip(),
email and email.strip('<> '),
url and url.strip('() '))
else:
raise ValueError('Incorrect PHP composer persons: %(persons)r' % locals())
def build_dep_package(package, scope, is_runtime, is_optional):
return models.DependentPackage(
purl=package.purl,
scope=scope,
is_runtime=is_runtime,
is_optional=is_optional,
is_resolved=True,
)
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/packagedcode/phpcomposer.py
| 0.430028 | 0.158207 |
phpcomposer.py
|
pypi
|
import io
import json
from packageurl import PackageURL
from packagedcode import models
"""
Handle haxelib Haxe packages
See
- https://lib.haxe.org/all/ this lists all the packages.
- https://lib.haxe.org/documentation/creating-a-haxelib-package/
- https://github.com/HaxeFoundation/haxelib
- https://github.com/gogoprog/hxsocketio/blob/master/haxelib.json
- https://github.com/HaxeFoundation/haxelib/blob/development/haxelib.json
Download and homepage are using these conventions:
- https://lib.haxe.org/p/format/
- https://lib.haxe.org/files/3.0/tweenx-1,0,4.zip
- https://lib.haxe.org/p/format/3.4.1/download/
- https://lib.haxe.org/files/3.0/format-3,4,1.zip
"""
# TODO: Update the license based on a mapping:
# Per the doc:
# Can be GPL, LGPL, BSD, Public (for Public Domain), MIT, or Apache.
class HaxelibJsonHandler(models.DatafileHandler):
datasource_id = 'haxelib_json'
path_patterns = ('*/haxelib.json',)
default_package_type = 'haxe'
default_primary_language = 'Haxe'
description = 'Haxe haxelib.json metadata file'
documentation_url = 'https://lib.haxe.org/documentation/creating-a-haxelib-package/'
@classmethod
def _parse(cls, json_data):
name = json_data.get('name')
version = json_data.get('version')
package_data = models.PackageData(
datasource_id=cls.datasource_id,
type=cls.default_package_type,
name=name,
version=version,
homepage_url=json_data.get('url'),
extracted_license_statement=json_data.get('license'),
keywords=json_data.get('tags'),
description=json_data.get('description'),
primary_language=cls.default_primary_language,
)
if name and version:
download_url = f'https://lib.haxe.org/p/{name}/{version}/download/'
package_data.repository_download_url = download_url
package_data.download_url = download_url
if name:
package_data.repository_homepage_url = f'https://lib.haxe.org/p/{name}'
for contrib in json_data.get('contributors', []):
party = models.Party(
type=models.party_person,
name=contrib,
role='contributor',
url='https://lib.haxe.org/u/{}'.format(contrib))
package_data.parties.append(party)
for dep_name, dep_version in json_data.get('dependencies', {}).items():
dep_version = dep_version and dep_version.strip()
is_resolved = bool(dep_version)
dep_purl = PackageURL(
type=cls.default_package_type,
name=dep_name,
version=dep_version
).to_string()
dep = models.DependentPackage(purl=dep_purl, is_resolved=is_resolved,)
package_data.dependencies.append(dep)
return package_data
@classmethod
def parse(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package_data archive, manifest or similar.
{
"name": "haxelib",
"url" : "https://lib.haxe.org/documentation/",
"license": "GPL",
"tags": ["haxelib", "core"],
"description": "The haxelib client",
"classPath": "src",
"version": "3.4.0",
"releasenote": " * Fix password input issue in Windows (#421).\n * ....",
"contributors": ["back2dos", "ncannasse", "jason", "Simn", "nadako", "andyli"]
}
"""
with io.open(location, encoding='utf-8') as loc:
json_data = json.load(loc)
yield cls._parse(json_data)
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/packagedcode/haxe.py
| 0.477067 | 0.245854 |
haxe.py
|
pypi
|
from contextlib import closing
import pefile
from ftfy import fix_text
from commoncode import text
from packagedcode import models
from packagedcode.models import Party
from packagedcode.models import party_org
from typecode import contenttype
TRACE = False
def logger_debug(*args):
pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(
isinstance(a, str) and a or repr(a) for a in args))
"""
Extract data from windows PE DLLs and executable.
Note that the extraction may not be correct for all PE in particular
older legacy PEs. See tests and:
http://msdn.microsoft.com/en-us/library/aa381058%28v=VS.85%29.aspx
PE stores data in a "VarInfo" structure for "variable information".
VarInfo are by definition variable key/value pairs:
http://msdn.microsoft.com/en-us/library/ms646995%28v=vs.85%29.aspx
Therefore we use a list of the most common and useful key names with
an eye on origin and license related information and return a value
when there is one present.
"""
"""
https://docs.microsoft.com/en-us/windows/win32/menurc/versioninfo-resource
Name Description
Comments Additional information that should be displayed for
diagnostic purposes.
CompanyName Company that produced the file?for example, "Microsoft
Corporation" or "Standard Microsystems Corporation, Inc."
This string is required.
FileDescription File description to be presented to users. This string may
be displayed in a list box when the user is choosing files
to install?for example, "Keyboard Driver for AT-Style
Keyboards". This string is required.
FileVersion Version number of the file?for example, "3.10" or
"5.00.RC2". This string is required.
InternalName Internal name of the file, if one exists?for example, a
module name if the file is a dynamic-link library. If the
file has no internal name, this string should be the
original filename, without extension. This string is
required.
LegalCopyright Copyright notices that apply to the file. This should
include the full text of all notices, legal symbols,
copyright dates, and so on. This string is optional.
LegalTrademarks Trademarks and registered trademarks that apply to the file.
This should include the full text of all notices, legal
symbols, trademark numbers, and so on. This string is
optional.
OriginalFilename Original name of the file, not including a path. This
information enables an application to determine whether a
file has been renamed by a user. The format of the name
depends on the file system for which the file was created.
This string is required.
ProductName Name of the product with which the file is distributed. This
string is required.
ProductVersion Version of the product with which the file is
distributed?for example, "3.10" or "5.00.RC2". This string
is required.
"""
# List of common info keys found in PE.
PE_INFO_KEYS = (
'Full Version', # rare and used only by Java exe
'ProductVersion', # the actual version
'FileVersion', # another common version
'Assembly Version', # a version common in MSFT, redundant when present with ProductVersion
'BuildDate', # rare but useful when there 2013/02/04-18:07:46 2018-11-10 14:38
'ProductName', # often present often localized, that's a component name
'OriginalFilename', # name or the original DLL
'InternalName', # often present: sometimes a package name or a .dll or .exe
'License', # rare, seen only in CURL
'LegalCopyright', # copyright notice, sometimes a license tag or URL. Use it for license detection
'LegalTrademarks', # may sometimes contains license or copyright. Ignore a single ".". Treat as part of the declared license
'LegalTrademarks1', # mostly MSFT
'LegalTrademarks2', # mostly MSFT
'LegalTrademarks3', # mostly MSFT
'FileDescription', # description, often localized
'Comments', # random data. Append to a description
'CompanyName', # the company e.g a party, sometimes localized
'Company', # rare, use a fallback if present and CCompanyName missing
'URL', # rarely there but good if there
'WWW', # rarely there but good if there
)
PE_INFO_KEYSET = set(PE_INFO_KEYS)
def pe_info(location):
"""
Return a mapping of common data available for a Windows dll or exe PE
(portable executable).
Return None for non-Windows PE files.
Return an empty mapping for PE from which we could not collect data.
Also collect extra data found if any, returned as a dictionary under the
'extra_data' key in the returned mapping.
"""
if not location:
return {}
result = dict([(k, None,) for k in PE_INFO_KEYS])
extra_data = result['extra_data'] = {}
with closing(pefile.PE(location)) as pe:
if not hasattr(pe, 'FileInfo'):
# No fileinfo section: we return just empties
return result
# >>> pe.FileInfo: this is a list of list of Structure objects:
# [[<Structure: [VarFileInfo] >, <Structure: [StringFileInfo]>]]
file_info = pe.FileInfo
if not file_info or not isinstance(file_info, list):
if TRACE:
logger.debug('pe_info: not file_info')
return result
# here we have a non-empty list
file_info = file_info[0]
if TRACE:
logger.debug('pe_info: file_info:', file_info)
string_file_info = [x for x in file_info
if type(x) == pefile.Structure
and hasattr(x, 'name')
and x.name == 'StringFileInfo']
if not string_file_info:
# No stringfileinfo section: we return just empties
if TRACE:
logger.debug('pe_info: not string_file_info')
return result
string_file_info = string_file_info[0]
if not hasattr(string_file_info, 'StringTable'):
# No fileinfo.StringTable section: we return just empties
if TRACE:
logger.debug('pe_info: not StringTable')
return result
string_table = string_file_info.StringTable
if not string_table or not isinstance(string_table, list):
return result
string_table = string_table[0]
if TRACE:
logger.debug(
'pe_info: Entries keys: ' + str(set(k for k in string_table.entries)))
logger.debug('pe_info: Entry values:')
for k, v in string_table.entries.items():
logger.debug(' ' + str(k) + ': ' + repr(type(v)) + repr(v))
for k, v in string_table.entries.items():
# convert unicode to a safe ASCII representation
key = text.as_unicode(k).strip()
value = text.as_unicode(v).strip()
value = fix_text(value)
if key in PE_INFO_KEYSET:
result[key] = value
else:
extra_data[key] = value
return result
def get_first(mapping, *keys):
"""
Return the first value of the `keys` that is found in the `mapping`.
"""
for key in keys:
value = mapping.get(key)
if value:
return value
def concat(mapping, *keys):
"""
Return a concatenated string of all unique values of the `keys found in the
`mapping`.
"""
values = []
for key in keys:
val = mapping.get(key)
if val and val not in values:
values.append(val)
return '\n'.join(values)
class WindowsExecutableHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'windows_executable'
default_package_type = 'winexe'
filetypes = ('pe32', 'for ms windows',)
path_patterns = (
'*.exe',
'*.dll',
'*.mui',
'*.mun',
'*.com',
'*.winmd',
'*.sys',
'*.tlb',
'*.exe_*',
'*.dll_*',
'*.mui_*',
'*.mun_*',
'*.com_*',
'*.winmd_*',
'*.sys_*',
'*.tlb_*',
'*.ocx',
)
description = 'Windows Portable Executable metadata'
documentation_url = 'https://en.wikipedia.org/wiki/Portable_Executable'
@classmethod
def is_datafile(cls, location, filetypes=tuple()):
"""
Return True if the file at location is highly likely to be a POM.
"""
if super().is_datafile(location, filetypes=filetypes):
return True
T = contenttype.get_type(location)
if T.is_winexe:
return True
@classmethod
def parse(cls, location):
infos = pe_info(location)
version = get_first(
infos,
'Full Version',
'ProductVersion',
'FileVersion',
'Assembly Version',
)
release_date = get_first(infos, 'BuildDate')
if release_date:
if len(release_date) >= 10:
release_date = release_date[:10]
release_date = release_date.replace('/', '-')
name = get_first(
infos,
'ProductName',
'OriginalFilename',
'InternalName',
)
copyr = get_first(infos, 'LegalCopyright')
LegalCopyright = copyr,
LegalTrademarks = concat(
infos,
'LegalTrademarks',
'LegalTrademarks1',
'LegalTrademarks2',
'LegalTrademarks3')
License = get_first(infos, 'License')
extracted_license_statement = {}
if LegalCopyright or LegalTrademarks or License:
extracted_license_statement = dict(
LegalCopyright=copyr,
LegalTrademarks=LegalTrademarks,
License=License
)
description = concat(infos, 'FileDescription', 'Comments')
parties = []
cname = get_first(infos, 'CompanyName', 'Company')
if cname:
parties = [Party(type=party_org, role='author', name=cname)]
homepage_url = get_first(infos, 'URL', 'WWW')
yield models.PackageData(
datasource_id=cls.datasource_id,
type=cls.default_package_type,
name=name,
version=version,
release_date=release_date,
copyright=copyr,
extracted_license_statement=extracted_license_statement,
description=description,
parties=parties,
homepage_url=homepage_url,
)
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/packagedcode/win_pe.py
| 0.564339 | 0.180504 |
win_pe.py
|
pypi
|
try:
from license_expression import Licensing
from license_expression import combine_expressions as le_combine_expressions
except:
Licensing = None
le_combine_expressions = None
PLAIN_URLS = (
'https://',
'http://',
)
VCS_URLS = (
'git://',
'git+git://',
'git+https://',
'git+http://',
'hg://',
'hg+http://',
'hg+https://',
'svn://',
'svn+https://',
'svn+http://',
)
# TODO this does not really normalize the URL
# TODO handle vcs_tool
def normalize_vcs_url(repo_url, vcs_tool=None):
"""
Return a normalized vcs_url version control URL given some `repo_url` and an
optional `vcs_tool` hint (such as 'git', 'hg', etc.
Handles shortcuts for GitHub, GitHub gist, Bitbucket, or GitLab repositories
and more using the same approach as npm install:
See https://docs.npmjs.com/files/package.json#repository
or https://getcomposer.org/doc/05-repositories.md
This is done here in npm:
https://github.com/npm/npm/blob/d3c858ce4cfb3aee515bb299eb034fe1b5e44344/node_modules/hosted-git-info/git-host-info.js
These should be resolved:
npm/npm
gist:11081aaa281
bitbucket:example/repo
gitlab:another/repo
expressjs/serve-static
git://github.com/angular/di.js.git
git://github.com/hapijs/boom
[email protected]:balderdashy/waterline-criteria.git
http://github.com/ariya/esprima.git
http://github.com/isaacs/nopt
https://github.com/chaijs/chai
https://github.com/christkv/kerberos.git
https://gitlab.com/foo/private.git
[email protected]:foo/private.git
"""
if not repo_url or not isinstance(repo_url, str):
return
repo_url = repo_url.strip()
if not repo_url:
return
# TODO: If we match http and https, we may should add more check in
# case if the url is not a repo one. For example, check the domain
# name in the url...
if repo_url.startswith(VCS_URLS + PLAIN_URLS):
return repo_url
if repo_url.startswith('git@'):
tool, _, right = repo_url.partition('@')
if ':' in repo_url:
host, _, repo = right.partition(':')
else:
# [email protected]/Filirom1/npm2aur.git
host, _, repo = right.partition('/')
if any(r in host for r in ('bitbucket', 'gitlab', 'github')):
scheme = 'https'
else:
scheme = 'git'
return '%(scheme)s://%(host)s/%(repo)s' % locals()
# FIXME: where these URL schemes come from??
if repo_url.startswith(('bitbucket:', 'gitlab:', 'github:', 'gist:')):
hoster_urls = {
'bitbucket': 'https://bitbucket.org/%(repo)s',
'github': 'https://github.com/%(repo)s',
'gitlab': 'https://gitlab.com/%(repo)s',
'gist': 'https://gist.github.com/%(repo)s', }
hoster, _, repo = repo_url.partition(':')
return hoster_urls[hoster] % locals()
if len(repo_url.split('/')) == 2:
# implicit github, but that's only on NPM?
return f'https://github.com/{repo_url}'
return repo_url
def build_description(summary, description):
"""
Return a description string from a summary and description
"""
summary = (summary or '').strip()
description = (description or '').strip()
if not description:
description = summary
else:
if summary and summary not in description:
description = '\n'.join([summary , description])
return description
_LICENSING = Licensing and Licensing() or None
def combine_expressions(
expressions,
relation='AND',
unique=True,
licensing=_LICENSING,
):
"""
Return a combined license expression string with relation, given a sequence of
license ``expressions`` strings or LicenseExpression objects.
"""
if not licensing:
raise Exception('combine_expressions: cannot combine combine_expressions without license_expression package.')
return expressions and str(le_combine_expressions(expressions, relation, unique, licensing)) or None
def get_ancestor(levels_up, resource, codebase):
"""
Return the nth-``levels_up`` ancestor Resource of ``resource`` in
``codebase`` or None.
For example, with levels_up=2 and starting with a resource path of
`gem-extract/metadata.gz-extract/metadata.gz-extract`,
then `gem-extract/` should be returned.
"""
rounds = 0
while rounds < levels_up:
resource = resource.parent(codebase)
if not resource:
return
rounds += 1
return resource
def find_root_from_paths(paths, resource, codebase):
"""
Return the resource for the root directory of this filesystem or None, given
a ``resource`` in ``codebase`` with a list of possible resource root-
relative ``paths`` (e.g. extending from the root directory we are looking
for).
"""
for path in paths:
if not resource.path.endswith(path):
continue
return find_root_resource(path=path, resource=resource, codebase=codebase)
def find_root_resource(path, resource, codebase):
"""
Return the resource for the root directory of this filesystem or None, given
a ``resource`` in ``codebase`` with a possible resource root-relative
``path`` (e.g. extending from the root directory we are looking for).
"""
if not resource.path.endswith(path):
return
for _seg in path.split('/'):
resource = resource.parent(codebase)
if not resource:
return
return resource
def yield_dependencies_from_package_data(package_data, datafile_path, package_uid):
"""
Yield a Dependency for each dependency from ``package_data.dependencies``
"""
from packagedcode import models
dependent_packages = package_data.dependencies
if dependent_packages:
yield from models.Dependency.from_dependent_packages(
dependent_packages=dependent_packages,
datafile_path=datafile_path,
datasource_id=package_data.datasource_id,
package_uid=package_uid,
)
def yield_dependencies_from_package_resource(resource, package_uid=None):
"""
Yield a Dependency for each dependency from each package from``resource.package_data``
"""
from packagedcode import models
for pkg_data in resource.package_data:
pkg_data = models.PackageData.from_dict(pkg_data)
yield from yield_dependencies_from_package_data(pkg_data, resource.path, package_uid)
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/packagedcode/utils.py
| 0.509276 | 0.394551 |
utils.py
|
pypi
|
import io
import json
from commoncode import datautils
from packagedcode import models
import attr
from packageurl import PackageURL
"""
Handle Godeps-like Go package dependency data.
Note: there are other dependency tools for Go beside Godeps, yet several use the
same format. Godeps (and glide, etc.) is mostly legacy today and replaced by Go
modules.
"""
# FIXME: update to use the latest vendor conventions.
# consider other legacy format?
# https://github.com/golang/dep/blob/master/Gopkg.lock
# https://github.com/golang/dep/blob/master/Gopkg.toml
class GodepsHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'godeps'
default_package_type = 'golang'
default_primary_language = 'Go'
path_patterns = ('*/Godeps.json',)
description = 'Go Godeps'
documentation_url = 'https://github.com/tools/godep'
@classmethod
def parse(cls, location):
godeps = Godep(location)
if godeps.import_path:
# we create a purl from the import path to parse ns/name nicely
purl = PackageURL.from_string(f'pkg:golang/{godeps.import_path}')
namespace = purl.namespace
name = purl.name
else:
namespace = None
name = None
dependencies = []
deps = godeps.dependencies or []
for dep in deps:
dependencies.append(
models.DependentPackage(
purl=str(PackageURL.from_string(f'pkg:golang/{dep.import_path}')),
extracted_requirement=dep.revision,
scope='Deps',
is_runtime=True,
is_optional=False,
is_resolved=False,
)
)
yield models.PackageData(
datasource_id=cls.datasource_id,
type=cls.default_package_type,
namespace=namespace,
name=name,
primary_language=cls.default_primary_language,
dependencies=dependencies,
)
@classmethod
def assign_package_to_resources(cls, package, resource, codebase, package_adder):
models.DatafileHandler.assign_package_to_parent_tree(package, resource, codebase, package_adder)
@attr.s
class Dep:
import_path = datautils.String()
revision = datautils.String()
comment = datautils.String()
def to_dict(self):
return attr.asdict(self)
# map of Godep names to our own attribute names
NAMES = {
'ImportPath': 'import_path',
'GoVersion': 'go_version',
'Packages': 'packages',
'Deps': 'dependencies',
'Comment': 'comment',
'Rev': 'revision',
}
@attr.s
class Godep:
"""
Represent JSON dep file with this structure:
type Godeps struct {
ImportPath string
GoVersion string // Abridged output of 'go version'.
Packages []string // Arguments to godep save, if any.
Deps []struct {
ImportPath string
Comment string // Description of commit, if present.
Rev string // VCS-specific commit ID.
}
}
ImportPath
GoVersion
Packages
Deps
ImportPath
Comment
Rev
"""
location = datautils.String()
import_path = datautils.String()
go_version = datautils.String()
packages = datautils.List(item_type=str)
dependencies = datautils.List(item_type=Dep)
def __attrs_post_init__(self, *args, **kwargs):
if self.location:
self.load(self.location)
def load(self, location):
"""
Load self from a location string or a file-like object containing a
Godeps JSON.
"""
with io.open(location, encoding='utf-8') as godep:
text = godep.read()
return self.loads(text)
def loads(self, text):
"""
Load a Godeps JSON text.
"""
data = json.loads(text)
for key, value in data.items():
name = NAMES.get(key)
if name == 'dependencies':
self.dependencies = self.parse_deps(value)
else:
setattr(self, name, value)
return self
def parse_deps(self, deps):
"""
Return a list of Dep from a ``deps`` list of dependency mappings.
"""
deps_list = []
for dep in deps:
data = dict((NAMES[key], value) for key, value in dep.items())
deps_list.append(Dep(**data))
return deps_list or []
def to_dict(self):
return {
'import_path': self.import_path,
'go_version': self.go_version,
'packages': self.packages,
'dependencies': [d.to_dict() for d in self.dependencies],
}
def parse(location):
"""
Return a mapping of parsed Godeps from the file at `location`.
"""
return Godep(location).to_dict()
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/packagedcode/godeps.py
| 0.538741 | 0.242441 |
godeps.py
|
pypi
|
import warnings
import saneyaml
from packageurl import PackageURL
from packagedcode import models
"""
Collect data from Dart pub packages.
See https://dart.dev/tools/pub/pubspec
API has theses URLs:
is limited and only returns all versions of a package
- feeds https://pub.dev/feed.atom
- all packages, paginated: https://pub.dev/api/packages
- one package, all version: https://pub.dev/api/packages/painter
- one version: https://pub.dev/api/packages/painter/versions/0.3.1
See https://github.com/dart-lang/pub/blob/master/doc/repository-spec-v2.md
"""
# FIXME: warnings reported here DO NOT work. We should have a better way
class BaseDartPubspecHandler(models.DatafileHandler):
@classmethod
def assemble(cls, package_data, resource, codebase, package_adder):
datafile_name_patterns = \
DartPubspecYamlHandler.path_patterns + DartPubspecLockHandler.path_patterns
if resource.has_parent():
dir_resource = resource.parent(codebase)
else:
dir_resource = resource
yield from cls.assemble_from_many_datafiles(
datafile_name_patterns=datafile_name_patterns,
directory=dir_resource,
codebase=codebase,
package_adder=package_adder,
)
class DartPubspecYamlHandler(BaseDartPubspecHandler):
datasource_id = 'pubspec_yaml'
path_patterns = ('*pubspec.yaml',)
default_package_type = 'pubspec'
default_primary_language = 'dart'
description = 'Dart pubspec manifest'
documentation_url = 'https://dart.dev/tools/pub/pubspec'
@classmethod
def parse(cls, location):
with open(location) as inp:
pubspec_data = saneyaml.load(inp.read())
package_data = build_package(pubspec_data)
if package_data:
yield package_data
class DartPubspecLockHandler(BaseDartPubspecHandler):
datasource_id = 'pubspec_lock'
path_patterns = ('*pubspec.lock',)
default_package_type = 'pubspec'
default_primary_language = 'dart'
description = 'Dart pubspec lockfile'
documentation_url = 'https://web.archive.org/web/20220330081004/https://gpalma.pt/blog/what-is-the-pubspec-lock/'
@classmethod
def parse(cls, location):
with open(location) as inp:
locks_data = saneyaml.load(inp.read())
dependencies = list(collect_locks(locks_data))
yield models.PackageData(
datasource_id=cls.datasource_id,
type=cls.default_package_type,
primary_language=cls.default_primary_language,
dependencies=dependencies
)
def collect_locks(locks_data):
"""
Yield DependentPackage from locks data
The general form is
packages:
_fe_analyzer_shared:
dependency: transitive
description:
name: _fe_analyzer_shared
url: "https://pub.dartlang.org"
source: hosted
version: "22.0.0"
sdks:
dart: ">=2.12.0 <3.0.0"
"""
# FIXME: we treat all as nno optioanl for now
sdks = locks_data.get('sdks') or {}
for name, version in sdks.items():
dep = build_dep(
name,
version,
scope='sdk',
is_runtime=True,
is_optional=False,
)
yield dep
packages = locks_data.get('packages') or {}
for name, details in packages.items():
version = details.get('version')
# FIXME: see https://github.com/dart-lang/pub/blob/2a08832e0b997ff92de65571b6d79a9b9099faa0/lib/src/lock_file.dart#L344
# transitive, direct main, direct dev, direct overridden.
# they do not map exactly to the pubspec scopes since transitive can be
# either main or dev
scope = details.get('dependency')
if scope == 'direct dev':
is_runtime = False
else:
is_runtime = True
desc = details.get('description') or {}
known_desc = isinstance(desc, dict)
# issue a warning for unknown data structure
warn = False
if not known_desc:
if not (isinstance(desc, str) and desc == 'flutter'):
warn = True
else:
dname = desc.get('name')
durl = desc.get('url')
dsource = details.get('source')
if (
(dname and dname != name)
or (durl and durl != 'https://pub.dartlang.org')
or (dsource and dsource not in ['hosted', 'sdk', ])
):
warn = True
if warn:
warnings.warn(
f'Dart pubspec.locks with unsupported external repo '
f'description or source: {details}',
stacklevel=1,
)
dep = build_dep(
name,
version,
scope=scope,
is_runtime=is_runtime,
is_optional=False,
)
yield dep
def collect_deps(data, dependency_field_name, is_runtime=True, is_optional=False):
"""
Yield DependentPackage found in the ``dependency_field_name`` of ``data``.
Use is_runtime and is_optional in created DependentPackage.
The shape of the data is:
dependencies:
path: 1.7.0
meta: ^1.2.4
yaml: ^3.1.0
environment:
sdk: '>=2.12.0 <3.0.0'
"""
# TODO: these can be more complex for SDKs
# https://dart.dev/tools/pub/dependencies#dependency-sources
dependencies = data.get(dependency_field_name) or {}
for name, version in dependencies.items():
dep = build_dep(
name,
version,
scope=dependency_field_name,
is_runtime=is_runtime,
is_optional=is_optional,
)
yield dep
def build_dep(name, version, scope, is_runtime=True, is_optional=False):
"""
Return DependentPackage from the provided data.
"""
# TODO: these can be more complex for SDKs
# https://dart.dev/tools/pub/dependencies#dependency-sources
if isinstance(version, dict) and 'sdk' in version:
# {'sdk': 'flutter'} type of deps....
# which is a wart that we keep as a requiremnet
version = ', '.join(': '.join([k, str(v)]) for k, v in version.items())
if version.replace('.', '').isdigit():
# version is pinned exactly if it is only made of dots and digits
purl = PackageURL(
type='pubspec',
name=name,
version=version,
)
is_resolved = True
else:
purl = PackageURL(
type='pubspec',
name=name,
)
is_resolved = False
dep = models.DependentPackage(
purl=purl.to_string(),
extracted_requirement=version,
scope=scope,
is_runtime=is_runtime,
is_optional=is_optional,
is_resolved=is_resolved,
)
return dep
def build_package(pubspec_data):
"""
Return a package object from a package data mapping or None
"""
name = pubspec_data.get('name')
version = pubspec_data.get('version')
description = pubspec_data.get('description')
homepage_url = pubspec_data.get('homepage')
extracted_license_statement = pubspec_data.get('license')
vcs_url = pubspec_data.get('repository')
download_url = pubspec_data.get('archive_url')
api_data_url = name and version and f'https://pub.dev/api/packages/{name}/versions/{version}'
repository_homepage_url = name and version and f'https://pub.dev/packages/{name}/versions/{version}'
# A URL should be in the form of:
# https://pub.dartlang.org/packages/url_launcher/versions/6.0.9.tar.gz
# And it may resolve to:
# https://storage.googleapis.com/pub-packages/packages/http-0.13.2.tar.gz
# as seen in the pub.dev web pages
repository_download_url = name and version and f'https://pub.dartlang.org/packages/{name}/versions/{version}.tar.gz'
download_url = download_url or repository_download_url
# Author and authors are deprecated
authors = []
author = pubspec_data.get('author')
if author:
authors.append(author)
authors.extend(pubspec_data.get('authors') or [])
parties = []
for auth in authors:
parties.append(models.Party(
type=models.party_person,
role='author',
name=auth
))
package_dependencies = []
dependencies = collect_deps(
pubspec_data,
'dependencies',
is_runtime=True,
is_optional=False,
)
package_dependencies.extend(dependencies)
dev_dependencies = collect_deps(
pubspec_data,
'dev_dependencies',
is_runtime=False,
is_optional=True,
)
package_dependencies.extend(dev_dependencies)
env_dependencies = collect_deps(
pubspec_data,
'environment',
is_runtime=True,
is_optional=False,
)
package_dependencies.extend(env_dependencies)
extra_data = {}
def add_to_extra_if_present(_key):
_value = pubspec_data.get(_key)
if _value:
extra_data[_key] = _value
add_to_extra_if_present('issue_tracker')
add_to_extra_if_present('documentation')
add_to_extra_if_present('dependencies_overrides')
add_to_extra_if_present('executables')
add_to_extra_if_present('publish_to')
return models.PackageData(
datasource_id=DartPubspecYamlHandler.datasource_id,
type=DartPubspecYamlHandler.default_primary_language,
primary_language=DartPubspecYamlHandler.default_primary_language,
name=name,
version=version,
download_url=download_url,
vcs_url=vcs_url,
description=description,
extracted_license_statement=extracted_license_statement,
parties=parties,
homepage_url=homepage_url,
dependencies=package_dependencies,
extra_data=extra_data,
repository_homepage_url=repository_homepage_url,
api_data_url=api_data_url,
repository_download_url=repository_download_url,
)
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/packagedcode/pubspec.py
| 0.525612 | 0.209834 |
pubspec.py
|
pypi
|
import re
from collections import defaultdict
from binascii import crc32
from itertools import islice
from licensedcode.stopwords import STOPWORDS
from textcode.analysis import numbered_text_lines
"""
Utilities to break texts in lines and tokens (aka. words) with specialized
version for queries and rules texts.
"""
def query_lines(
location=None,
query_string=None,
strip=True,
start_line=1,
plain_text=False,
):
"""
Return an iterable of tuples (line number, text line) given a file at
`location` or a `query string`. Include empty lines.
Line numbers start at ``start_line`` which is 1-based by default.
If `plain_text` is True treat the file as a plain text file and do not
attempt to detect its type and extract its content with special procedures.
This is used mostly when loading license texts and rules.
"""
# TODO: OPTIMIZE: tokenizing line by line may be rather slow
# we could instead get lines and tokens at once in a batch?
numbered_lines = []
if location:
numbered_lines = numbered_text_lines(
location,
demarkup=False,
start_line=start_line,
plain_text=plain_text,
)
elif query_string:
if strip:
keepends = False
else:
keepends = True
numbered_lines = enumerate(
query_string.splitlines(keepends),
start_line,
)
for line_number, line in numbered_lines:
if strip:
yield line_number, line.strip()
else:
yield line_number, line.rstrip('\n') + '\n'
# Split on whitespace and punctuations: keep only characters and numbers and +
# when in the middle or end of a word. Keeping the trailing + is important for
# licenses name such as GPL2+. The use a double negation "not non word" meaning
# "words" to define the character ranges
query_pattern = '[^_\\W]+\\+?[^_\\W]*'
word_splitter = re.compile(query_pattern, re.UNICODE).findall
key_phrase_pattern = '(?:' + query_pattern + '|\\{\\{|\\}\\})'
key_phrase_splitter = re.compile(key_phrase_pattern, re.UNICODE).findall
KEY_PHRASE_OPEN = '{{'
KEY_PHRASE_CLOSE = '}}'
# FIXME: this should be folded in a single pass tokenization with the index_tokenizer
def key_phrase_tokenizer(text, stopwords=STOPWORDS):
"""
Yield tokens from a rule ``text`` including key phrases {{brace}} markers.
This tokenizer behaves the same as as the ``index_tokenizer`` returning also
KEY_PHRASE_OPEN and KEY_PHRASE_CLOSE as separate tokens so that they can be
used to parse key phrases.
>>> x = list(key_phrase_splitter('{{AGPL-3.0 GNU Affero License v3.0}}'))
>>> assert x == ['{{', 'AGPL', '3', '0', 'GNU', 'Affero', 'License', 'v3', '0', '}}'], x
>>> x = list(key_phrase_splitter('{{{AGPL{{{{Affero }}License}}0}}'))
>>> assert x == ['{{', 'AGPL', '{{', '{{', 'Affero', '}}', 'License', '}}', '0', '}}'], x
>>> list(index_tokenizer('')) == []
True
>>> x = list(index_tokenizer('{{AGPL-3.0 GNU Affero License v3.0}}'))
>>> assert x == ['agpl', '3', '0', 'gnu', 'affero', 'license', 'v3', '0']
>>> x = list(key_phrase_tokenizer('{{AGPL-3.0 GNU Affero License v3.0}}'))
>>> assert x == ['{{', 'agpl', '3', '0', 'gnu', 'affero', 'license', 'v3', '0', '}}']
"""
if not text:
return
for token in key_phrase_splitter(text.lower()):
if token and token not in stopwords:
yield token
def index_tokenizer(text, stopwords=STOPWORDS):
"""
Return an iterable of tokens from a rule or query ``text`` using index
tokenizing rules. Ignore words that exist as lowercase in the ``stopwords``
set.
For example::
>>> list(index_tokenizer(''))
[]
>>> x = list(index_tokenizer('some Text with spAces! + _ -'))
>>> assert x == ['some', 'text', 'with', 'spaces']
>>> x = list(index_tokenizer('{{}some }}Text with spAces! + _ -'))
>>> assert x == ['some', 'text', 'with', 'spaces']
>>> x = list(index_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}'))
>>> assert x == ['hi', 'some', 'text', 'with', 'noth+', 'ing', 'junk', 'spaces']
>>> stops = set(['quot', 'lt', 'gt'])
>>> x = list(index_tokenizer('some "< markup >"', stopwords=stops))
>>> assert x == ['some', 'markup']
"""
if not text:
return []
words = word_splitter(text.lower())
return (token for token in words if token and token not in stopwords)
def index_tokenizer_with_stopwords(text, stopwords=STOPWORDS):
"""
Return a tuple of (tokens, stopwords_by_pos) for a rule
``text`` using index tokenizing rules where tokens is a list of tokens and
stopwords_by_pos is a mapping of {pos: stops count} where "pos" is a token
position and "stops count" is the number of stopword tokens after this
position if any. For stopwords at the start, the position is using the magic
-1 key. Use the lowercase ``stopwords`` set.
For example::
>>> toks, stops = index_tokenizer_with_stopwords('')
>>> assert toks == [], (toks, stops)
>>> assert stops == {}
>>> toks, stops = index_tokenizer_with_stopwords('some Text with spAces! + _ -')
>>> assert toks == ['some', 'text', 'with', 'spaces'], (toks, stops)
>>> assert stops == {}
>>> toks, stops = index_tokenizer_with_stopwords('{{}some }}Text with spAces! + _ -')
>>> assert toks == ['some', 'text', 'with', 'spaces'], (toks, stops)
>>> assert stops == {}
>>> toks, stops = index_tokenizer_with_stopwords('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}')
>>> assert toks == ['hi', 'some', 'text', 'with', 'noth+', 'ing', 'junk', 'spaces'], (toks, stops)
>>> assert stops == {}
>>> stops = set(['quot', 'lt', 'gt'])
>>> toks, stops = index_tokenizer_with_stopwords('some "< markup >"', stopwords=stops)
>>> assert toks == ['some', 'markup'], (toks, stops)
>>> assert stops == {0: 2, 1: 2}
>>> toks, stops = index_tokenizer_with_stopwords('{{g', stopwords=stops)
>>> assert toks == ['g'], (toks, stops)
>>> assert stops == {}
"""
if not text:
return [], {}
tokens = []
tokens_append = tokens.append
# we use a defaultdict as a convenience at construction time
# TODO: use the actual words and not just a count
stopwords_by_pos = defaultdict(int)
pos = -1
for token in word_splitter(text.lower()):
if token:
if token in stopwords:
# If we have not yet started, then all tokens seen so far
# are stopwords and we keep a count of them in the magic
# "-1" position.
stopwords_by_pos[pos] += 1
else:
pos += 1
tokens_append(token)
return tokens, dict(stopwords_by_pos)
def query_tokenizer(text):
"""
Return an iterable of tokens from a unicode query text. Do not ignore stop
words. They are handled at a later stage in a query.
For example::
>>> list(query_tokenizer(''))
[]
>>> x = list(query_tokenizer('some Text with spAces! + _ -'))
>>> assert x == ['some', 'text', 'with', 'spaces']
>>> x = list(query_tokenizer('{{}some }}Text with spAces! + _ -'))
>>> assert x == ['some', 'text', 'with', 'spaces']
>>> x = list(query_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}'))
>>> assert x == ['hi', 'some', 'text', 'with', 'noth+', 'ing', 'junk', 'spaces']
"""
if not text:
return []
words = word_splitter(text.lower())
return (token for token in words if token)
# Alternate pattern which is the opposite of query_pattern used for
# matched text collection
not_query_pattern = '[_\\W\\s\\+]+[_\\W\\s]?'
# collect tokens and non-token texts in two different groups
_text_capture_pattern = (
'(?P<token>' +
query_pattern +
')' +
'|' +
'(?P<punct>' +
not_query_pattern +
')'
)
tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer
def matched_query_text_tokenizer(text):
"""
Return an iterable of tokens and non-tokens punctuation from a unicode query
text keeping everything (including punctuations, line endings, etc.)
The returned iterable contains 2-tuples of:
- True if the string is a text token or False if this is not
(such as punctuation, spaces, etc).
- the corresponding string.
This is used to reconstruct the matched query text for reporting.
"""
if not text:
return
for match in tokens_and_non_tokens(text):
if match:
mgd = match.groupdict()
token = mgd.get('token')
punct = mgd.get('punct')
if token:
yield True, token
elif punct:
yield False, punct
else:
# this should never happen
raise Exception('Internal error in matched_query_text_tokenizer')
def ngrams(iterable, ngram_length):
"""
Return an iterable of ngrams of length `ngram_length` given an `iterable`.
Each ngram is a tuple of `ngram_length` items.
The returned iterable is empty if the input iterable contains less than
`ngram_length` items.
Note: this is a fairly arcane but optimized way to compute ngrams.
For example:
>>> list(ngrams([1,2,3,4,5], 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams([1,2,3,4,5], 4))
[(1, 2, 3, 4), (2, 3, 4, 5)]
>>> list(ngrams([1,2,3,4], 2))
[(1, 2), (2, 3), (3, 4)]
>>> list(ngrams([1,2,3], 2))
[(1, 2), (2, 3)]
>>> list(ngrams([1,2], 2))
[(1, 2)]
>>> list(ngrams([1], 2))
[]
This also works with arrays or tuples:
>>> from array import array
>>> list(ngrams(array('h', [1,2,3,4,5]), 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams(tuple([1,2,3,4,5]), 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
"""
return zip(*(islice(iterable, i, None) for i in range(ngram_length)))
def select_ngrams(ngrams, with_pos=False):
"""
Return an iterable as a subset of a sequence of ngrams using the hailstorm
algorithm. If `with_pos` is True also include the starting position for the
ngram in the original sequence.
Definition from the paper: http://www2009.eprints.org/7/1/p61.pdf
The algorithm first fingerprints every token and then selects a shingle s
if the minimum fingerprint value of all k tokens in s occurs at the first
or the last position of s (and potentially also in between). Due to the
probabilistic properties of Rabin fingerprints the probability that a
shingle is chosen is 2/k if all tokens in the shingle are different.
For example:
>>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]))
[(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]
Positions can also be included. In this case, tuple of (pos, ngram) are returned:
>>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)], with_pos=True))
[(0, (2, 1, 3)), (1, (1, 1, 3)), (2, (5, 1, 3)), (3, (2, 6, 1)), (4, (7, 3, 4))]
This works also from a generator:
>>> list(select_ngrams(x for x in [(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]))
[(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]
"""
last = None
for pos, ngram in enumerate(ngrams):
# FIXME: use a proper hash
nghs = []
for ng in ngram:
if isinstance(ng, str):
ng = bytearray(ng, encoding='utf-8')
else:
ng = bytearray(str(ng).encode('utf-8'))
nghs.append(crc32(ng) & 0xffffffff)
min_hash = min(nghs)
if with_pos:
ngram = (pos, ngram,)
if min_hash in (nghs[0], nghs[-1]):
yield ngram
last = ngram
else:
# always yield the first or last ngram too.
if pos == 0:
yield ngram
last = ngram
if last != ngram:
yield ngram
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/licensedcode/tokenize.py
| 0.533884 | 0.333856 |
tokenize.py
|
pypi
|
ScanCode license detection overview and key design elements
===========================================================
License detection is about finding common texts between the text of a query file
being scanned and the texts of the indexed license texts and rule texts. The process
strives to be correct first and fast second.
Ideally we want to find the best alignment possible between two texts so we know
exactly where they match: the scanned text and one or more of the many license texts.
We settle for good alignments rather than optimal alignments by still returning
accurate and correct matches in a reasonable amount of time.
Correctness is essential but efficiency too: both in terms of speed and memory usage.
One key to efficient matching is to process not characters but whole words and use
internally not strings but integers to represent a word.
Rules and licenses
------------------
The detection uses an index of reference license texts and a set of "rules" that are
common notices or mentions of these licenses. The things that makes detection
sometimes difficult is that a license reference can be very short as in "this is GPL"
or very long as a full license text for the GPLv3. To cope with this we use different
matching strategies and also compute the resemblance and containment of texts that
are matched.
Words as integers
-----------------
A dictionary mapping words to a unique integer is used to transform a scanned text
"query" words and reference indexed license texts and rules words to numbers.
This is possible because we have a limited number of words across all the license
texts (about 15K). We further assign these ids to words such that very common words
have a low id and less common, more discriminant words have a higher id. And define a
thresholds for this ids range such that very common words below that threshold cannot
possible form a license text or mention together.
Once that mapping is applied, the detection then only deal with integers in two
dimensions:
- the token ids (and whether they are in the high or low range).
- their positions in the query (qpos) and the indexed rule (ipos).
We also use an integer id for a rule.
All operations are from then on dealing with list, arrays or sets of integers in
defined ranges.
Matches are reduced to sets of integers we call "Spans":
- matched positions on the query side
- matched positions on the index side
By using integers in known ranges throughout, several operations are reduced to
integer and integer sets or lists comparisons and intersection. These operations
are faster and more readily optimizable.
With integers, we also use less memory:
- we can use arrays of unsigned 16 bits ints that store each number on two bytes
rather than bigger lists of ints.
- we can replace dictionaries by sparse lists or arrays where the index is an integer key.
- we can use succinct, bit level representations (e.g. bitmaps) of integer sets.
Smaller data structures also means faster processing as the processors need to move
less data in memory.
With integers we can also be faster:
- a dict key lookup is slower than a list of array index lookup.
- processing large list of small structures is faster (such as bitmaps, etc).
- we can leverage libraries that speed up integer set operations.
Common/junk tokens
------------------
The quality and speed of detection is supported by classifying each word as either
good/discriminant or common/junk. Junk tokens are either very frequent of tokens that
taken together together cannot form some valid license mention or notice. When a
numeric id is assigned to a token during initial indexing, junk tokens are assigned a
lower id than good tokens. These are then called low or junk tokens and high or good
tokens.
Query processing
----------------
When a file is scanned, it is first converted to a query object which is a list of
integer token ids. A query is further broken down in slices (a.k.a. query runs) based
on heuristics.
While the query is processed a set of matched and matchable positions for for high
and low token ids is kept to track what is left to do in matching.
Matching pipeline
-----------------
The matching pipeline consist of:
- we start with matching the whole query at once against hashes on the whole text
looked up agains a mapping of hash to license rule. We exit if we have a match.
- then we match the whole query for exact matches using an automaton (Aho-Corasick).
We exit if we have a match.
- then each query run is processed in sequence:
- the best potentially matching rules are found with two rounds of approximate
"set" matching. This set matching uses a "bag of words" approach where the
scanned text is transformed in a vector of integers based on the presence or
absence of a word. It is compared against the index of vectors. This is similar
conceptually to a traditional inverted index search for information retrieval.
The best matches are ranked using a resemblance and containment comparison. A
second round is performed on the best matches using multisets which are set where
the number of occurrence of each word is also taken into account. The best matches
are ranked again using a resemblance and containment comparison and is more
accurate than the previous set matching.
- using the ranked potential candidate matches from the two previous rounds, we
then perform a pair-wise local sequence alignment between these candidates and
the query run. This sequence alignment is essentially an optimized diff working
on integer sequences and takes advantage of the fact that some very frequent
words are considered less discriminant: this speeds up the sequence alignment
significantly. The number of multiple local sequence alignments that are required
in this step is also made much smaller by the pre-matching done using sets.
- finally all the collected matches are merged, refined and filtered to yield the
final results. The merging considers the ressemblance, containment and overlap
between scanned texts and the matched texts and several secondary factors.
Filtering is based on the density and length of matches as well as the number of
good or frequent tokens matched.
Last, each match receives a score which is the based on the length of the rule text
and how of this rule text was matched. Optionally we can also collect the exact
matched texts and which part was not match for each match.
Comparison with other tools approaches
--------------------------------------
Most tools use regular expressions. The problem is that creating these expressions
requires a lot of intimate knowledge of the data set and the relation between each
license texts. The maintenance effort is high. And regex matches typically need a
complex second pass of disambiguation for similar matches.
Some tools use an index of pre-defined sentences and match these as regex and then
reassemble possible matches. They tend to suffer from the same issues as a pure regex
based approach and require an intimate knowledge of the license texts and how they
relate to each other.
Some tools use pair-wise comparisons like ScanCode. But in doing so they usually
perform poorly because a multiple local sequence alignment is an expensisve
computation. Say you scan 1000 files and you have 1000 reference texts. You would
need to recursively make multiple times 1000 comparisons with each scanned file very
quickly performing the equivalent 100 million diffs or more to process these files.
Because of the progressive matching pipeline used in ScanCode, sequence alignments
may not be needed at all in the common cases and when they are, only a few are
needed.
See also this list: https://wiki.debian.org/CopyrightReviewTools
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/licensedcode/README.rst
| 0.932699 | 0.982288 |
README.rst
|
pypi
|
from collections.abc import Set
from itertools import count
from itertools import groupby
from intbitset import intbitset
"""
Ranges and intervals of integers using bitmaps.
Used as a compact and faster data structure for token and position sets.
"""
class Span(Set):
"""
Represent ranges of integers (such as tokens positions) as a set of integers.
A Span is hashable and not meant to be modified once created, like a frozenset.
It is equivalent to a sparse closed interval.
Originally derived and heavily modified from Whoosh Span.
"""
def __init__(self, *args):
"""
Create a new Span from a start and end ints or an iterable of ints.
First form:
Span(start int, end int) : the span is initialized with a range(start, end+1)
Second form:
Span(iterable of ints) : the span is initialized with the iterable
Spans are hashable and immutable.
For example:
>>> s = Span(1)
>>> s.start
1
>>> s = Span([1, 2])
>>> s.start
1
>>> s.end
2
>>> s
Span(1, 2)
>>> s = Span(1, 3)
>>> s.start
1
>>> s.end
3
>>> s
Span(1, 3)
>>> s = Span([6, 5, 1, 2])
>>> s.start
1
>>> s.end
6
>>> s
Span(1, 2)|Span(5, 6)
>>> len(s)
4
>>> Span([5, 6, 7, 8, 9, 10 ,11, 12]) == Span([5, 6, 7, 8, 9, 10 ,11, 12])
True
>>> hash(Span([5, 6, 7, 8, 9, 10 ,11, 12])) == hash(Span([5, 6, 7, 8, 9, 10 ,11, 12]))
True
>>> hash(Span([5, 6, 7, 8, 9, 10 ,11, 12])) == hash(Span(5, 12))
True
"""
len_args = len(args)
if len_args == 0:
self._set = intbitset()
elif len_args == 1:
# args0 is a single int or an iterable of ints
if isinstance(args[0], int):
self._set = intbitset(args)
else:
# some sequence or iterable
self._set = intbitset(list(args[0]))
elif len_args == 2:
# args0 and args1 describe a start and end closed range
self._set = intbitset(range(args[0], args[1] + 1))
else:
# args0 is a single int or args is an iterable of ints
# args is an iterable of ints
self._set = intbitset(list(args))
@classmethod
def _from_iterable(cls, it):
return cls(list(it))
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._set)
def __hash__(self):
return hash(tuple(self._set))
def __eq__(self, other):
return isinstance(other, Span) and self._set == other._set
def __and__(self, *others):
return Span(self._set.intersection(*[o._set for o in others]))
def __or__(self, *others):
return Span(self._set.union(*[o._set for o in others]))
def union(self, *others):
"""
Return the union of this span with other spans as a new span.
(i.e. all positions that are in either spans.)
"""
return self.__or__(*others)
def difference(self, *others):
"""
Return the difference of two or more spans as a new span.
(i.e. all positions that are in this span but not the others.)
"""
return Span(self._set.difference(*[o._set for o in others]))
def __repr__(self):
"""
Return a brief representation of this span by only listing contiguous
spans and not all items.
For example:
>>> Span([1, 2, 3, 4, 5, 7, 8, 9, 10])
Span(1, 5)|Span(7, 10)
"""
subspans_repr = []
for subs in self.subspans():
ls = len(subs)
if not ls:
subspans_repr.append('Span()')
elif ls == 1:
subspans_repr.append('Span(%d)' % subs.start)
else:
subspans_repr.append('Span(%d, %d)' % (subs.start, subs.end))
return '|'.join(subspans_repr)
def __contains__(self, other):
"""
Return True if this span contains other span (where other is a Span, an
int or an ints set).
For example:
>>> Span([5, 7]) in Span(5, 7)
True
>>> Span([5, 8]) in Span([5, 7])
False
>>> 6 in Span([4, 5, 6, 7, 8])
True
>>> 2 in Span([4, 5, 6, 7, 8])
False
>>> 8 in Span([4, 8])
True
>>> 5 in Span([4, 8])
False
>>> set([4, 5]) in Span([4, 5, 6, 7, 8])
True
>>> set([9]) in Span([4, 8])
False
"""
if isinstance(other, Span):
return self._set.issuperset(other._set)
if isinstance(other, int):
return self._set.__contains__(other)
if isinstance(other, (set, frozenset)):
return self._set.issuperset(intbitset(other))
if isinstance(other, intbitset):
return self._set.issuperset(other)
@property
def set(self):
return self._set
def issubset(self, other):
return self._set.issubset(other._set)
def issuperset(self, other):
return self._set.issuperset(other._set)
@property
def start(self):
if not self._set:
raise TypeError('Empty Span has no start.')
return self._set[0]
@property
def end(self):
if not self._set:
raise TypeError('Empty Span has no end.')
return self._set[-1]
@classmethod
def sort(cls, spans):
"""
Return a new sorted sequence of spans given a sequence of spans.
The primary sort is on start. The secondary sort is on length.
If two spans have the same start, the longer span will sort first.
For example:
>>> spans = [Span([5, 6, 7, 8, 9, 10]), Span([1, 2]), Span([3, 4, 5]), Span([3, 4, 5, 6]), Span([8, 9, 10])]
>>> Span.sort(spans)
[Span(1, 2), Span(3, 6), Span(3, 5), Span(5, 10), Span(8, 10)]
>>> spans = [Span([1, 2]), Span([3, 4, 5]), Span([3, 4, 5, 6]), Span([8, 9, 10])]
>>> Span.sort(spans)
[Span(1, 2), Span(3, 6), Span(3, 5), Span(8, 10)]
>>> spans = [Span([1, 2]), Span([4, 5]), Span([7, 8]), Span([11, 12])]
>>> Span.sort(spans)
[Span(1, 2), Span(4, 5), Span(7, 8), Span(11, 12)]
>>> spans = [Span([1, 2]), Span([7, 8]), Span([5, 6]), Span([12, 13])]
>>> Span.sort(spans)
[Span(1, 2), Span(5, 6), Span(7, 8), Span(12, 13)]
"""
key = lambda s: (s.start, -len(s),)
return sorted(spans, key=key)
def magnitude(self):
"""
Return the maximal length represented by this span start and end. The
magnitude is the same as the length for a contiguous span. It will be
greater than the length for a span with non-contiguous int items.
An empty span has a zero magnitude.
For example:
>>> Span([4, 8]).magnitude()
5
>>> len(Span([4, 8]))
2
>>> len(Span([4, 5, 6, 7, 8]))
5
>>> Span([4, 5, 6, 14 , 12, 128]).magnitude()
125
>>> Span([4, 5, 6, 7, 8]).magnitude()
5
>>> Span([0]).magnitude()
1
>>> Span([0]).magnitude()
1
"""
if not self._set:
return 0
return self.end - self.start + 1
def density(self):
"""
Return the density of this span as a ratio of its length to its
magnitude, a float between 0 and 1. A dense Span has all its integer
items contiguous and a maximum density of one. A sparse low density span
has some non-contiguous integer items. An empty span has a zero density.
For example:
>>> Span([4, 8]).density()
0.4
>>> Span([4, 5, 6, 7, 8]).density()
1.0
>>> Span([0]).density()
1.0
>>> Span().density()
0
"""
if not self._set:
return 0
return len(self) / self.magnitude()
def overlap(self, other):
"""
Return the count of overlapping items between this span and other span.
For example:
>>> Span([1, 2]).overlap(Span([5, 6]))
0
>>> Span([5, 6]).overlap(Span([5, 6]))
2
>>> Span([4, 5, 6, 7]).overlap(Span([5, 6]))
2
>>> Span([4, 5, 6]).overlap(Span([5, 6, 7]))
2
>>> Span([4, 5, 6]).overlap(Span([6]))
1
>>> Span([4, 5]).overlap(Span([6, 7]))
0
"""
return len(self & other)
def resemblance(self, other):
"""
Return a resemblance coefficient as a float between 0 and 1.
0 means the spans are completely different and 1 identical.
"""
if self._set.isdisjoint(other._set):
return 0
if self._set == other._set:
return 1
resemblance = self.overlap(other) / len(self | other)
return resemblance
def containment(self, other):
"""
Return a containment coefficient as a float between 0 and 1. This is an
indication of how much of the other span is contained in this span.
- 1 means the other span is entirely contained in this span.
- 0 means that the other span is not contained at all this span.
"""
if self._set.isdisjoint(other._set):
return 0
if self._set == other._set:
return 1
containment = self.overlap(other) / len(other)
return containment
def surround(self, other):
"""
Return True if this span surrounds other span.
This is different from containment. A span can surround another span region
and have no positions in common with the surrounded.
For example:
>>> Span([4, 8]).surround(Span([4, 8]))
True
>>> Span([3, 9]).surround(Span([4, 8]))
True
>>> Span([5, 8]).surround(Span([4, 8]))
False
>>> Span([4, 7]).surround(Span([4, 8]))
False
>>> Span([4, 5, 6, 7, 8]).surround(Span([5, 6, 7]))
True
"""
return self.start <= other.start and self.end >= other.end
def is_before(self, other):
return self.end < other.start
def is_after(self, other):
return self.start > other.end
def touch(self, other):
"""
Return True if self sequence is contiguous with other span without overlap.
For example:
>>> Span([5, 7]).touch(Span([5]))
False
>>> Span([5, 7]).touch(Span([5, 8]))
False
>>> Span([5, 7]).touch(Span([7, 8]))
False
>>> Span([5, 7]).touch(Span([8, 9]))
True
>>> Span([8, 9]).touch(Span([5, 7]))
True
"""
return self.start == other.end + 1 or self.end == other.start - 1
def distance_to(self, other):
"""
Return the absolute positive distance from this span to other span.
Overlapping spans have a zero distance.
Non-overlapping touching spans have a distance of one.
For example:
>>> Span([8, 9]).distance_to(Span([5, 7]))
1
>>> Span([5, 7]).distance_to(Span([8, 9]))
1
>>> Span([5, 6]).distance_to(Span([8, 9]))
2
>>> Span([8, 9]).distance_to(Span([5, 6]))
2
>>> Span([5, 7]).distance_to(Span([5, 7]))
0
>>> Span([4, 5, 6]).distance_to(Span([5, 6, 7]))
0
>>> Span([5, 7]).distance_to(Span([10, 12]))
3
>>> Span([1, 2]).distance_to(Span(range(4, 52)))
2
"""
if self.overlap(other):
return 0
if self.touch(other):
return 1
if self.is_before(other):
return other.start - self.end
else:
return self.start - other.end
@staticmethod
def from_ints(ints):
"""
Return a sequence of Spans from an iterable of ints. A new Span is
created for each group of monotonously increasing int items.
>>> Span.from_ints([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
[Span(1, 12)]
>>> Span.from_ints([1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12])
[Span(1, 3), Span(5, 12)]
>>> Span.from_ints([0, 2, 3, 5, 6, 7, 8, 9, 10, 11, 13])
[Span(0), Span(2, 3), Span(5, 11), Span(13)]
"""
ints = sorted(set(ints))
groups = (group for _, group in groupby(ints, lambda group, c=count(): next(c) - group))
return [Span(g) for g in groups]
def subspans(self):
"""
Return a list of Spans creating one new Span for each set of contiguous
integer items.
For example:
>>> span = Span(5, 6, 7, 8, 9, 10) | Span([1, 2]) | Span(3, 5) | Span(3, 6) | Span([8, 9, 10])
>>> span.subspans()
[Span(1, 10)]
When subspans are not touching they do not merge :
>>> span = Span([63, 64]) | Span([58, 58])
>>> span.subspans()
[Span(58), Span(63, 64)]
Overlapping subspans are merged as needed:
>>> span = Span([12, 17, 24]) | Span([15, 16, 17, 35]) | Span(58) | Span(63, 64)
>>> span.subspans()
[Span(12), Span(15, 17), Span(24), Span(35), Span(58), Span(63, 64)]
"""
return Span.from_ints(self)
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/licensedcode/spans.py
| 0.91611 | 0.491334 |
spans.py
|
pypi
|
import os
import json
import pathlib
from datetime import datetime
from os.path import dirname
from os.path import join
from distutils.dir_util import copy_tree
import click
import saneyaml
from commoncode.cliutils import MISC_GROUP
from commoncode.cliutils import PluggableCommandLineOption
from jinja2 import Environment, FileSystemLoader
from licensedcode.models import load_licenses
from licensedcode.models import licenses_data_dir
from scancode_config import __version__ as scancode_version
from scancode_config import spdx_license_list_version
TEMPLATES_DIR = os.path.join(dirname(__file__), 'templates')
STATIC_DIR = os.path.join(dirname(__file__), 'static')
def write_file(path, filename, content):
path.joinpath(filename).open("w").write(content)
def now():
return datetime.utcnow().strftime('%Y-%m-%d')
base_context = {
"scancode_version": scancode_version,
"now": now(),
"spdx_license_list_version": spdx_license_list_version,
}
base_context_test = {
"scancode_version": "32.0.0b1",
"now": "Dec 22, 2022",
"spdx_license_list_version": "3.20",
}
def generate_indexes(output_path, environment, licenses, test=False):
"""
Generates the license index and the static website at ``output_path``.
``environment`` is a jinja Environment object used to generate the webpage
and ``licenses`` is a mapping with scancode license data.
"""
if test:
base_context_mapping = base_context_test
else:
base_context_mapping = base_context
static_dest_dir = join(output_path, 'static')
if not os.path.exists(static_dest_dir):
os.makedirs(static_dest_dir)
copy_tree(STATIC_DIR, static_dest_dir)
license_list_template = environment.get_template("license_list.html")
index_html = license_list_template.render(
**base_context_mapping,
licenses=licenses,
)
write_file(output_path, "index.html", index_html)
index = [
{
"license_key": key,
"category": lic.category,
"spdx_license_key": lic.spdx_license_key,
"other_spdx_license_keys": lic.other_spdx_license_keys,
"is_exception": lic.is_exception,
"is_deprecated": lic.is_deprecated,
"json": f"{key}.json",
"yaml": f"{key}.yml",
"html": f"{key}.html",
"license": f"{key}.LICENSE",
}
for key, lic in licenses.items()
]
write_file(
output_path,
"index.json",
json.dumps(index, indent=2, sort_keys=False)
)
write_file(
output_path,
"index.yml",
saneyaml.dump(index, indent=2)
)
return len(index)
def generate_details(output_path, environment, licenses, test=False):
"""
Dumps data at ``output_path`` in JSON, YAML and HTML formats and also dumps
the .LICENSE file with the license text and the data as YAML frontmatter.
``environment`` is a jinja Environment object used to generate the webpage
and ``licenses`` is a mapping with scancode license data.
``test`` is to generate a stable output for testing only
"""
from licensedcode.cache import get_cache
include_builtin = get_cache().has_additional_licenses
if test:
base_context_mapping = base_context_test
else:
base_context_mapping = base_context
license_details_template = environment.get_template("license_details.html")
for lic in licenses.values():
license_data = lic.to_dict(include_text=False, include_builtin=include_builtin)
license_data_with_text = lic.to_dict(include_text=True, include_builtin=include_builtin)
html = license_details_template.render(
**base_context_mapping,
license=lic,
license_data=license_data,
)
write_file(output_path, f"{lic.key}.html", html)
write_file(
output_path,
f"{lic.key}.yml",
saneyaml.dump(license_data_with_text, indent=2)
)
write_file(
output_path,
f"{lic.key}.json",
json.dumps(license_data_with_text, indent=2, sort_keys=False)
)
lic.dump(output_path)
def generate_help(output_path, environment, test=False):
"""
Generate a help.html with help text at ``output_path`` ``environment`` is a
jinja Environment object used to generate the webpage. ``test`` is to
generate a stable output for testing only
"""
if test:
base_context_mapping = base_context_test
else:
base_context_mapping = base_context
template = environment.get_template("help.html")
html = template.render(**base_context_mapping)
write_file(output_path, "help.html", html)
def generate(
build_location,
template_dir=TEMPLATES_DIR,
licenses_data_dir=licenses_data_dir,
test=False,
):
"""
Generate a licenseDB static website and dump license data at
``build_location`` given a license directory ``licenses_data_dir`` using
templates from ``template_dir``. ``test`` is to generate a stable output for
testing only
"""
if not os.path.exists(build_location):
os.makedirs(build_location)
env = Environment(
loader=FileSystemLoader(template_dir),
autoescape=True,
)
licenses = dict(sorted(
load_licenses(licenses_data_dir=licenses_data_dir, with_deprecated=True).items()
))
root_path = pathlib.Path(build_location)
root_path.mkdir(parents=False, exist_ok=True)
count = generate_indexes(output_path=root_path, environment=env, licenses=licenses, test=test)
generate_details(output_path=root_path, environment=env, licenses=licenses, test=test)
generate_help(output_path=root_path, environment=env, test=test)
return count
def scancode_license_data(path):
"""
Dump license data from scancode licenses to the directory ``path`` passed
in from command line.
Dumps data in JSON, YAML and HTML formats and also dumps the .LICENSE file
with the license text and the data as YAML frontmatter.
"""
click.secho(f'Dumping license data to: {path}', err=True)
count = generate(build_location=path)
click.secho(f'Done dumping #{count} licenses.', err=True)
@click.command(name='scancode-license-data')
@click.option(
'--path',
type=click.Path(exists=False, writable=True, file_okay=False, resolve_path=True, path_type=str),
metavar='DIR',
help='Dump the license data in this directory in the LicenseDB format and exit. '
'Creates the directory if it does not exist. ',
help_group=MISC_GROUP,
cls=PluggableCommandLineOption,
)
@click.help_option('-h', '--help')
def dump_scancode_license_data(
path,
*args,
**kwargs,
):
"""
Dump scancode license data in various formats, and the licenseDB static website at `path`.
"""
scancode_license_data(path=path)
if __name__ == '__main__':
dump_scancode_license_data()
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/licensedcode/license_db.py
| 0.499756 | 0.176175 |
license_db.py
|
pypi
|
from collections import namedtuple
import sys
import time
"""
Computes the difference between two texts. Originally based on
Diff Match and Patch
Copyright 2018 The diff-match-patch Authors.
original author [email protected] (Neil Fraser)
https://github.com/google/diff-match-patch
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Changes
2019-05-14: This file has been substantially modified.
All non-diff code has been removed.
Most methods have been moved to plain functions
A new difflib-like match_blocks function has been added
that works from sequences of ints.
"""
TRACE = False
def logger_debug(*args): pass
if TRACE:
import logging
logger = logging.getLogger(__name__)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
# The data structure representing a diff is an array of tuples:
# [(DIFF_DELETE, "Hello"), (DIFF_INSERT, "Goodbye"), (DIFF_EQUAL, " world.")]
# which means: delete "Hello", add "Goodbye" and keep " world."
DIFF_DELETE = -1
DIFF_INSERT = 1
DIFF_EQUAL = 0
Match = namedtuple('Match', 'a b size')
def match_blocks(a, b, a_start, a_end, *args, **kwargs):
"""
Return a list of matching block Match triples describing matching
subsequences of `a` in `b` starting from the `a_start` position in `a` up to
the `a_end` position in `a`.
"""
if TRACE:
logger_debug('a_start', a_start, 'a_end', a_end)
# convert sequences to strings
text1 = int2unicode(a[a_start:a_end])
text2 = int2unicode(b)
df = Differ(timeout=0.01)
diffs = df.difference(text1, text2)
diffs = trim(diffs)
apos = a_start
bpos = 0
matches = []
for op, matched_text in diffs:
size = len(matched_text)
if not size:
continue
if op == DIFF_EQUAL:
matches.append(Match(apos, bpos, size))
apos += size
bpos += size
elif op == DIFF_INSERT:
bpos += size
elif op == DIFF_DELETE:
apos += size
return matches
def int2unicode(nums):
"""
Convert an array of positive integers to a unicode string.
"""
return u''.join(chr(i + 1) for i in nums)
def trim(diffs):
"""
Remove trailing INSERT and DELETE from a list of diffs
"""
# FIXME: this may be best done in the main loop?
while diffs:
op, _ = diffs[-1]
if op in (DIFF_DELETE, DIFF_INSERT):
diffs.pop()
else:
break
return diffs
class Differ(object):
def __init__(self, timeout=0.1):
# Number of seconds to compute a diff before giving up (0 for infinity).
self.timeout = timeout
def difference(self, text1, text2, deadline=None):
"""
Find the differences between two texts. Simplifies the problem by
stripping any common prefix or suffix off the texts before diffing.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Optional time when the diff should be complete by. Used
internally for recursive calls. Users should set timeout instead.
Returns:
Array of changes.
"""
if text1 == None or text2 == None:
raise ValueError('Illegal empty inputs')
# Check for equality (speedup).
if text1 == text2:
if text1:
return [(DIFF_EQUAL, text1)]
return []
# Set a deadline by which time the diff must be complete.
if deadline == None:
# Unlike in most languages, Python counts time in seconds.
if not self.timeout:
deadline = sys.maxsize
else:
deadline = time.time() + self.timeout
# Trim off common prefix (speedup).
commonlength = common_prefix(text1, text2)
commonprefix = text1[:commonlength]
text1 = text1[commonlength:]
text2 = text2[commonlength:]
# Trim off common suffix (speedup).
commonlength = common_suffix(text1, text2)
if commonlength == 0:
commonsuffix = ''
else:
commonsuffix = text1[-commonlength:]
text1 = text1[:-commonlength]
text2 = text2[:-commonlength]
# Compute the diff on the middle block.
diffs = self.compute(text1, text2, deadline)
# Restore the prefix and suffix.
if commonprefix:
diffs[:0] = [(DIFF_EQUAL, commonprefix)]
if commonsuffix:
diffs.append((DIFF_EQUAL, commonsuffix))
diffs = merge(diffs)
return diffs
def compute(self, text1, text2, deadline):
"""
Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
if not text1:
# Just add some text (speedup).
return [(DIFF_INSERT, text2)]
if not text2:
# Just delete some text (speedup).
return [(DIFF_DELETE, text1)]
len_text1 = len(text1)
len_text2 = len(text2)
reversed_diff = len_text1 > len_text2
if reversed_diff:
longtext, shorttext = text1, text2
len_shorttext = len_text2
else:
shorttext, longtext = text1, text2
len_shorttext = len_text1
i = longtext.find(shorttext)
if i != -1:
# Shorter text is inside the longer text (speedup).
diffs = [(DIFF_INSERT, longtext[:i]),
(DIFF_EQUAL, shorttext),
(DIFF_INSERT, longtext[i + len_shorttext:])]
# Swap insertions for deletions if diff is reversed.
if reversed_diff:
diffs[0] = (DIFF_DELETE, diffs[0][1])
diffs[2] = (DIFF_DELETE, diffs[2][1])
return diffs
if len_shorttext == 1:
# Single character string.
# After the previous speedup, the character can't be an equality.
return [(DIFF_DELETE, text1), (DIFF_INSERT, text2)]
# Check to see if the problem can be split in two.
hm = half_match(text1, text2, len_text1, len_text2)
if hm:
# A half-match was found, sort out the return data.
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
# Send both pairs off for separate processing.
diffs_a = self.difference(text1_a, text2_a, deadline)
diffs_b = self.difference(text1_b, text2_b, deadline)
# Merge the results.
return diffs_a + [(DIFF_EQUAL, mid_common)] + diffs_b
return self.bisect(text1, text2, deadline, len_text1, len_text2)
def bisect(self, text1, text2, deadline, len_text1, len_text2):
"""
Find the 'middle snake' of a diff, split the problem in two
and return the recursively constructed diff.
See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
max_d = (len_text1 + len_text2 + 1) // 2
v_offset = max_d
v_length = 2 * max_d
v1 = [-1] * v_length
v1[v_offset + 1] = 0
v2 = v1[:]
delta = len_text1 - len_text2
# If the total number of characters is odd, then the front path will
# collide with the reverse path.
front = (delta % 2 != 0)
# Offsets for start and end of k loop.
# Prevents mapping of space beyond the grid.
k1start = 0
k1end = 0
k2start = 0
k2end = 0
for d in range(max_d):
# Bail out if deadline is reached.
if time.time() > deadline:
break
# Walk the front path one step.
for k1 in range(-d + k1start, d + 1 - k1end, 2):
k1_offset = v_offset + k1
if k1 == -d or (k1 != d and v1[k1_offset - 1] < v1[k1_offset + 1]):
x1 = v1[k1_offset + 1]
else:
x1 = v1[k1_offset - 1] + 1
y1 = x1 - k1
while (x1 < len_text1 and y1 < len_text2 and text1[x1] == text2[y1]):
x1 += 1
y1 += 1
v1[k1_offset] = x1
if x1 > len_text1:
# Ran off the right of the graph.
k1end += 2
elif y1 > len_text2:
# Ran off the bottom of the graph.
k1start += 2
elif front:
k2_offset = v_offset + delta - k1
if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1:
# Mirror x2 onto top-left coordinate system.
x2 = len_text1 - v2[k2_offset]
if x1 >= x2:
# Overlap detected.
return self.bisect_split(text1, text2, x1, y1, deadline)
# Walk the reverse path one step.
for k2 in range(-d + k2start, d + 1 - k2end, 2):
k2_offset = v_offset + k2
if k2 == -d or (k2 != d and v2[k2_offset - 1] < v2[k2_offset + 1]):
x2 = v2[k2_offset + 1]
else:
x2 = v2[k2_offset - 1] + 1
y2 = x2 - k2
while (x2 < len_text1 and y2 < len_text2 and text1[-x2 - 1] == text2[-y2 - 1]):
x2 += 1
y2 += 1
v2[k2_offset] = x2
if x2 > len_text1:
# Ran off the left of the graph.
k2end += 2
elif y2 > len_text2:
# Ran off the top of the graph.
k2start += 2
elif not front:
k1_offset = v_offset + delta - k2
if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1:
x1 = v1[k1_offset]
y1 = v_offset + x1 - k1_offset
# Mirror x2 onto top-left coordinate system.
x2 = len_text1 - x2
if x1 >= x2:
# Overlap detected.
return self.bisect_split(text1, text2, x1, y1, deadline)
# Diff took too long and hit the deadline or
# number of diffs equals number of characters, no commonality at all.
return [(DIFF_DELETE, text1), (DIFF_INSERT, text2)]
def bisect_split(self, text1, text2, x, y, deadline):
"""
Given the location of the 'middle snake', split the diff in two parts
and recurse.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
x: Index of split point in text1.
y: Index of split point in text2.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
text1a = text1[:x]
text2a = text2[:y]
text1b = text1[x:]
text2b = text2[y:]
# Compute both diffs serially.
diffs = self.difference(text1a, text2a, deadline)
diffsb = self.difference(text1b, text2b, deadline)
return diffs + diffsb
def half_match(text1, text2, len_text1, len_text2):
"""
Do the two texts share a substring which is at least half the length of
the longer text?
This speedup can produce non-minimal diffs.
Args:
text1: First string.
text2: Second string.
Returns:
Five element Array, containing the prefix of text1, the suffix of text1,
the prefix of text2, the suffix of text2 and the common middle. Or None
if there was no match.
"""
reversed_diff = len_text1 > len_text2
if reversed_diff:
longtext, shorttext = text1, text2
len_longtext, len_shorttext = len_text1, len_text2
else:
shorttext, longtext = text1, text2
len_shorttext, len_longtext = len_text1, len_text2
if len_longtext < 4 or len_shorttext * 2 < len_longtext:
# Pointless.
return None
# First check if the second quarter is the seed for a half-match.
hm1 = half_match_i(longtext, shorttext, (len_longtext + 3) // 4, len_longtext)
# Check again based on the third quarter.
hm2 = half_match_i(longtext, shorttext, (len_longtext + 1) // 2, len_longtext)
if not hm1 and not hm2:
return None
elif not hm2:
hm = hm1
elif not hm1:
hm = hm2
else:
# Both matched. Select the longest.
if len(hm1[4]) > len(hm2[4]):
hm = hm1
else:
hm = hm2
# A half-match was found, sort out the return data.
if reversed_diff:
text1_a, text1_b, text2_a, text2_b, mid_common = hm
else:
text2_a, text2_b, text1_a, text1_b, mid_common = hm
return text1_a, text1_b, text2_a, text2_b, mid_common
def half_match_i(longtext, shorttext, i, len_longtext):
"""
Does a substring of shorttext exist within longtext such that the substring
is at least half the length of longtext?
Args:
longtext: Longer string.
shorttext: Shorter string.
i: Start index of quarter length substring within longtext.
Returns:
Five element Array, containing:
- the prefix of longtext,
- the suffix of longtext,
- the prefix of shorttext,
- the suffix of shorttext
- the common middle.
Or None if there was no match.
"""
seed = longtext[i:i + len_longtext // 4]
best_common = ''
j = shorttext.find(seed)
while j != -1:
prefixLength = common_prefix(longtext[i:], shorttext[j:])
suffixLength = common_suffix(longtext[:i], shorttext[:j])
if len(best_common) < suffixLength + prefixLength:
best_common = (shorttext[j - suffixLength:j] + shorttext[j:j + prefixLength])
best_longtext_a = longtext[:i - suffixLength]
best_longtext_b = longtext[i + prefixLength:]
best_shorttext_a = shorttext[:j - suffixLength]
best_shorttext_b = shorttext[j + prefixLength:]
j = shorttext.find(seed, j + 1)
if len(best_common) * 2 >= len_longtext:
return (
best_longtext_a, best_longtext_b,
best_shorttext_a, best_shorttext_b,
best_common)
def cleanup_efficiency(diffs, editcost=4):
"""
Reduce the number of edits by eliminating operationally trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
# Stack of indices where equalities are found.
equalities = []
# Always equal to diffs[equalities[-1]][1]
last_equality = None
# Index of current position.
pointer = 0
# Is there an insertion operation before the last equality.
pre_ins = False
# Is there a deletion operation before the last equality.
pre_del = False
# Is there an insertion operation after the last equality.
post_ins = False
# Is there a deletion operation after the last equality.
post_del = False
while pointer < len(diffs):
if diffs[pointer][0] == DIFF_EQUAL: # Equality found.
if (len(diffs[pointer][1]) < editcost and (post_ins or post_del)):
# Candidate found.
equalities.append(pointer)
pre_ins = post_ins
pre_del = post_del
last_equality = diffs[pointer][1]
else:
# Not a candidate, and can never become one.
equalities = []
last_equality = None
post_ins = post_del = False
else: # An insertion or deletion.
if diffs[pointer][0] == DIFF_DELETE:
post_del = True
else:
post_ins = True
# Five types to be split:
# <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del>
# <ins>A</ins>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<ins>C</ins>
# <ins>A</del>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<del>C</del>
if last_equality and (
(pre_ins and pre_del and post_ins and post_del)
or
((len(last_equality) < editcost / 2)
and (pre_ins + pre_del + post_ins + post_del) == 3)):
# Duplicate record.
diffs.insert(equalities[-1], (DIFF_DELETE, last_equality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (DIFF_INSERT, diffs[equalities[-1] + 1][1])
# Throw away the equality we just deleted.
equalities.pop()
last_equality = None
if pre_ins and pre_del:
# No changes made which could affect previous entry, keep going.
post_ins = post_del = True
equalities = []
else:
if equalities:
# Throw away the previous equality.
equalities.pop()
if equalities:
pointer = equalities[-1]
else:
pointer = -1
post_ins = post_del = False
changes = True
pointer += 1
if changes:
diffs = merge(diffs)
return diffs
def common_prefix(text1, text2):
"""
Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[0] != text2[0]:
return 0
# Binary search.
# Performance analysis: https://neil.fraser.name/news/2007/10/09/
pointermin = 0
# TODO: move as args
len_text1 = len(text1)
len_text2 = len(text2)
pointermax = min(len_text1, len_text2)
pointermid = pointermax
pointerstart = 0
while pointermin < pointermid:
if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
pointermin = pointermid
pointerstart = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def common_suffix(text1, text2):
"""
Determine the common suffix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the end of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[-1] != text2[-1]:
return 0
# Binary search.
# Performance analysis: https://neil.fraser.name/news/2007/10/09/
pointermin = 0
# TODO: move as args
len_text1 = len(text1)
len_text2 = len(text2)
pointermax = min(len_text1, len_text2)
pointermid = pointermax
pointerend = 0
while pointermin < pointermid:
if (text1[-pointermid:len_text1 - pointerend] == text2[-pointermid:len(text2) - pointerend]):
pointermin = pointermid
pointerend = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def merge(diffs):
"""
Reorder and merge like edit sections in place. Merge equalities.
Any edit section can move as long as it doesn't cross an equality.
Return the merged diffs sequence.
Args:
diffs: Array of diff tuples.
"""
diffs.append((DIFF_EQUAL, '')) # Add a dummy entry at the end.
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete + count_insert > 1:
if count_delete != 0 and count_insert != 0:
# Factor out any common prefixies.
commonlength = common_prefix(text_insert, text_delete)
if commonlength != 0:
x = pointer - count_delete - count_insert - 1
if x >= 0 and diffs[x][0] == DIFF_EQUAL:
diffs[x] = (
diffs[x][0],
diffs[x][1] + text_insert[:commonlength])
else:
diffs.insert(0, (DIFF_EQUAL, text_insert[:commonlength]))
pointer += 1
text_insert = text_insert[commonlength:]
text_delete = text_delete[commonlength:]
# Factor out any common suffixies.
commonlength = common_suffix(text_insert, text_delete)
if commonlength != 0:
diffs[pointer] = (
diffs[pointer][0],
text_insert[-commonlength:] + diffs[pointer][1])
text_insert = text_insert[:-commonlength]
text_delete = text_delete[:-commonlength]
# Delete the offending records and add the merged ones.
new_ops = []
if len(text_delete) != 0:
new_ops.append((DIFF_DELETE, text_delete))
if len(text_insert) != 0:
new_ops.append((DIFF_INSERT, text_insert))
pointer -= count_delete + count_insert
diffs[pointer:pointer + count_delete + count_insert] = new_ops
pointer += len(new_ops) + 1
elif pointer != 0 and diffs[pointer - 1][0] == DIFF_EQUAL:
# Merge this equality with the previous one.
diffs[pointer - 1] = (
diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer][1])
del diffs[pointer]
else:
pointer += 1
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
if diffs[-1][1] == '':
diffs.pop() # Remove the dummy entry at the end.
# Second pass: look for single edits surrounded on both sides by equalities
# which can be shifted sideways to eliminate an equality.
# e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC
changes = False
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == DIFF_EQUAL and diffs[pointer + 1][0] == DIFF_EQUAL):
# This is a single edit surrounded by equalities.
if diffs[pointer][1].endswith(diffs[pointer - 1][1]):
# Shift the edit over the previous equality.
if diffs[pointer - 1][1] != "":
diffs[pointer] = (
diffs[pointer][0],
diffs[pointer - 1][1] + diffs[pointer][1][:-len(diffs[pointer - 1][1])])
diffs[pointer + 1] = (
diffs[pointer + 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
del diffs[pointer - 1]
changes = True
elif diffs[pointer][1].startswith(diffs[pointer + 1][1]):
# Shift the edit over the next equality.
diffs[pointer - 1] = (
diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
diffs[pointer] = (
diffs[pointer][0],
diffs[pointer][1][len(diffs[pointer + 1][1]):] + diffs[pointer + 1][1])
del diffs[pointer + 1]
changes = True
pointer += 1
# If shifts were made, the diff needs reordering and another shift sweep.
if changes:
diffs = merge(diffs)
return diffs
def levenshtein_distance(diffs):
"""
Compute the Levenshtein distance; the number of inserted, deleted or
substituted characters.
Args:
diffs: Array of diff tuples.
Returns:
Number of changes.
"""
levenshtein = 0
insertions = 0
deletions = 0
for (op, data) in diffs:
if op == DIFF_INSERT:
insertions += len(data)
elif op == DIFF_DELETE:
deletions += len(data)
elif op == DIFF_EQUAL:
# A deletion and an insertion is one substitution.
levenshtein += max(insertions, deletions)
insertions = 0
deletions = 0
levenshtein += max(insertions, deletions)
return levenshtein
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/licensedcode/dmp.py
| 0.721056 | 0.490724 |
dmp.py
|
pypi
|
from time import time
import sys
from licensedcode.match import LicenseMatch
from licensedcode.spans import Span
TRACE = False
TRACE2 = False
TRACE3 = False
def logger_debug(*args): pass
if TRACE or TRACE2 or TRACE3:
use_print = True
if use_print:
prn = print
else:
import logging
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
prn = logger.debug
def logger_debug(*args):
return prn(' '.join(isinstance(a, str) and a or repr(a) for a in args))
"""
Matching strategy using pair-wise multiple local sequences alignment and diff-
like approaches.
"""
MATCH_SEQ = '3-seq'
def match_sequence(idx, rule, query_run, high_postings, start_offset=0,
match_blocks=None, deadline=sys.maxsize):
"""
Return a list of LicenseMatch by matching the `query_run` tokens sequence
starting at `start_offset` against the `idx` index for the candidate `rule`.
Stop processing when reachin the deadline time.
"""
if not rule:
return []
if not match_blocks:
from licensedcode.seq import match_blocks
rid = rule.rid
itokens = idx.tids_by_rid[rid]
len_legalese = idx.len_legalese
qbegin = query_run.start + start_offset
qfinish = query_run.end
qtokens = query_run.query.tokens
query = query_run.query
matches = []
qstart = qbegin
# match as long as long we find alignments and have high matchable tokens
# this allows to find repeated instances of the same rule in the query run
while qstart <= qfinish:
if TRACE2:
logger_debug('\n\nmatch_seq:==========================LOOP=============================')
if not query_run.is_matchable(include_low=False):
break
if TRACE2:
logger_debug('match_seq:running block_matches:', 'a_start:', qstart, 'a_end', qfinish + 1)
block_matches = match_blocks(
a=qtokens, b=itokens, a_start=qstart, a_end=qfinish + 1,
b2j=high_postings, len_good=len_legalese,
matchables=query_run.matchables)
if not block_matches:
break
# create one match for each matching block: they will be further merged
# at LicenseMatch merging and filtering time
for qpos, ipos, mlen in block_matches:
qspan_end = qpos + mlen
# skip single non-high word matched as as sequence
if mlen > 1 or (mlen == 1 and qtokens[qpos] < len_legalese):
qspan = Span(range(qpos, qspan_end))
ispan = Span(range(ipos, ipos + mlen))
hispan = Span(p for p in ispan if itokens[p] < len_legalese)
match = LicenseMatch(
rule, qspan, ispan, hispan, qbegin,
matcher=MATCH_SEQ, query=query)
matches.append(match)
if TRACE2:
from licensedcode.tracing import get_texts
qt, it = get_texts(match)
logger_debug('###########################')
logger_debug(match)
logger_debug('###########################')
logger_debug(qt)
logger_debug('###########################')
logger_debug(it)
logger_debug('###########################')
qstart = max([qstart, qspan_end])
if time() > deadline:
break
if time() > deadline:
break
if TRACE:
logger_debug('match_seq: FINAL LicenseMatch(es)')
for m in matches:
logger_debug(m)
logger_debug('\n\n')
return matches
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/licensedcode/match_seq.py
| 0.510008 | 0.242878 |
match_seq.py
|
pypi
|
from __future__ import absolute_import, division, print_function
from ._compat import PY2
from ._make import NOTHING, Factory, pipe
if not PY2:
import inspect
import typing
__all__ = [
"default_if_none",
"optional",
"pipe",
"to_bool",
]
def optional(converter):
"""
A converter that allows an attribute to be optional. An optional attribute
is one which can be set to ``None``.
Type annotations will be inferred from the wrapped converter's, if it
has any.
:param callable converter: the converter that is used for non-``None``
values.
.. versionadded:: 17.1.0
"""
def optional_converter(val):
if val is None:
return None
return converter(val)
if not PY2:
sig = None
try:
sig = inspect.signature(converter)
except (ValueError, TypeError): # inspect failed
pass
if sig:
params = list(sig.parameters.values())
if params and params[0].annotation is not inspect.Parameter.empty:
optional_converter.__annotations__["val"] = typing.Optional[
params[0].annotation
]
if sig.return_annotation is not inspect.Signature.empty:
optional_converter.__annotations__["return"] = typing.Optional[
sig.return_annotation
]
return optional_converter
def default_if_none(default=NOTHING, factory=None):
"""
A converter that allows to replace ``None`` values by *default* or the
result of *factory*.
:param default: Value to be used if ``None`` is passed. Passing an instance
of `attrs.Factory` is supported, however the ``takes_self`` option
is *not*.
:param callable factory: A callable that takes no parameters whose result
is used if ``None`` is passed.
:raises TypeError: If **neither** *default* or *factory* is passed.
:raises TypeError: If **both** *default* and *factory* are passed.
:raises ValueError: If an instance of `attrs.Factory` is passed with
``takes_self=True``.
.. versionadded:: 18.2.0
"""
if default is NOTHING and factory is None:
raise TypeError("Must pass either `default` or `factory`.")
if default is not NOTHING and factory is not None:
raise TypeError(
"Must pass either `default` or `factory` but not both."
)
if factory is not None:
default = Factory(factory)
if isinstance(default, Factory):
if default.takes_self:
raise ValueError(
"`takes_self` is not supported by default_if_none."
)
def default_if_none_converter(val):
if val is not None:
return val
return default.factory()
else:
def default_if_none_converter(val):
if val is not None:
return val
return default
return default_if_none_converter
def to_bool(val):
"""
Convert "boolean" strings (e.g., from env. vars.) to real booleans.
Values mapping to :code:`True`:
- :code:`True`
- :code:`"true"` / :code:`"t"`
- :code:`"yes"` / :code:`"y"`
- :code:`"on"`
- :code:`"1"`
- :code:`1`
Values mapping to :code:`False`:
- :code:`False`
- :code:`"false"` / :code:`"f"`
- :code:`"no"` / :code:`"n"`
- :code:`"off"`
- :code:`"0"`
- :code:`0`
:raises ValueError: for any other value.
.. versionadded:: 21.3.0
"""
if isinstance(val, str):
val = val.lower()
truthy = {True, "true", "t", "yes", "y", "on", "1", 1}
falsy = {False, "false", "f", "no", "n", "off", "0", 0}
try:
if val in truthy:
return True
if val in falsy:
return False
except TypeError:
# Raised when "val" is not hashable (e.g., lists)
pass
raise ValueError("Cannot convert value to bool: {}".format(val))
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/licensedcode/_vendor/attr/converters.py
| 0.850142 | 0.249207 |
converters.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import operator
import re
from contextlib import contextmanager
from ._config import get_run_validators, set_run_validators
from ._make import _AndValidator, and_, attrib, attrs
from .exceptions import NotCallableError
try:
Pattern = re.Pattern
except AttributeError: # Python <3.7 lacks a Pattern type.
Pattern = type(re.compile(""))
__all__ = [
"and_",
"deep_iterable",
"deep_mapping",
"disabled",
"ge",
"get_disabled",
"gt",
"in_",
"instance_of",
"is_callable",
"le",
"lt",
"matches_re",
"max_len",
"optional",
"provides",
"set_disabled",
]
def set_disabled(disabled):
"""
Globally disable or enable running validators.
By default, they are run.
:param disabled: If ``True``, disable running all validators.
:type disabled: bool
.. warning::
This function is not thread-safe!
.. versionadded:: 21.3.0
"""
set_run_validators(not disabled)
def get_disabled():
"""
Return a bool indicating whether validators are currently disabled or not.
:return: ``True`` if validators are currently disabled.
:rtype: bool
.. versionadded:: 21.3.0
"""
return not get_run_validators()
@contextmanager
def disabled():
"""
Context manager that disables running validators within its context.
.. warning::
This context manager is not thread-safe!
.. versionadded:: 21.3.0
"""
set_run_validators(False)
try:
yield
finally:
set_run_validators(True)
@attrs(repr=False, slots=True, hash=True)
class _InstanceOfValidator(object):
type = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not isinstance(value, self.type):
raise TypeError(
"'{name}' must be {type!r} (got {value!r} that is a "
"{actual!r}).".format(
name=attr.name,
type=self.type,
actual=value.__class__,
value=value,
),
attr,
self.type,
value,
)
def __repr__(self):
return "<instance_of validator for type {type!r}>".format(
type=self.type
)
def instance_of(type):
"""
A validator that raises a `TypeError` if the initializer is called
with a wrong type for this particular attribute (checks are performed using
`isinstance` therefore it's also valid to pass a tuple of types).
:param type: The type to check for.
:type type: type or tuple of types
:raises TypeError: With a human readable error message, the attribute
(of type `attrs.Attribute`), the expected type, and the value it
got.
"""
return _InstanceOfValidator(type)
@attrs(repr=False, frozen=True, slots=True)
class _MatchesReValidator(object):
pattern = attrib()
match_func = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.match_func(value):
raise ValueError(
"'{name}' must match regex {pattern!r}"
" ({value!r} doesn't)".format(
name=attr.name, pattern=self.pattern.pattern, value=value
),
attr,
self.pattern,
value,
)
def __repr__(self):
return "<matches_re validator for pattern {pattern!r}>".format(
pattern=self.pattern
)
def matches_re(regex, flags=0, func=None):
r"""
A validator that raises `ValueError` if the initializer is called
with a string that doesn't match *regex*.
:param regex: a regex string or precompiled pattern to match against
:param int flags: flags that will be passed to the underlying re function
(default 0)
:param callable func: which underlying `re` function to call (options
are `re.fullmatch`, `re.search`, `re.match`, default
is ``None`` which means either `re.fullmatch` or an emulation of
it on Python 2). For performance reasons, they won't be used directly
but on a pre-`re.compile`\ ed pattern.
.. versionadded:: 19.2.0
.. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern.
"""
fullmatch = getattr(re, "fullmatch", None)
valid_funcs = (fullmatch, None, re.search, re.match)
if func not in valid_funcs:
raise ValueError(
"'func' must be one of {}.".format(
", ".join(
sorted(
e and e.__name__ or "None" for e in set(valid_funcs)
)
)
)
)
if isinstance(regex, Pattern):
if flags:
raise TypeError(
"'flags' can only be used with a string pattern; "
"pass flags to re.compile() instead"
)
pattern = regex
else:
pattern = re.compile(regex, flags)
if func is re.match:
match_func = pattern.match
elif func is re.search:
match_func = pattern.search
elif fullmatch:
match_func = pattern.fullmatch
else: # Python 2 fullmatch emulation (https://bugs.python.org/issue16203)
pattern = re.compile(
r"(?:{})\Z".format(pattern.pattern), pattern.flags
)
match_func = pattern.match
return _MatchesReValidator(pattern, match_func)
@attrs(repr=False, slots=True, hash=True)
class _ProvidesValidator(object):
interface = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.interface.providedBy(value):
raise TypeError(
"'{name}' must provide {interface!r} which {value!r} "
"doesn't.".format(
name=attr.name, interface=self.interface, value=value
),
attr,
self.interface,
value,
)
def __repr__(self):
return "<provides validator for interface {interface!r}>".format(
interface=self.interface
)
def provides(interface):
"""
A validator that raises a `TypeError` if the initializer is called
with an object that does not provide the requested *interface* (checks are
performed using ``interface.providedBy(value)`` (see `zope.interface
<https://zopeinterface.readthedocs.io/en/latest/>`_).
:param interface: The interface to check for.
:type interface: ``zope.interface.Interface``
:raises TypeError: With a human readable error message, the attribute
(of type `attrs.Attribute`), the expected interface, and the
value it got.
"""
return _ProvidesValidator(interface)
@attrs(repr=False, slots=True, hash=True)
class _OptionalValidator(object):
validator = attrib()
def __call__(self, inst, attr, value):
if value is None:
return
self.validator(inst, attr, value)
def __repr__(self):
return "<optional validator for {what} or None>".format(
what=repr(self.validator)
)
def optional(validator):
"""
A validator that makes an attribute optional. An optional attribute is one
which can be set to ``None`` in addition to satisfying the requirements of
the sub-validator.
:param validator: A validator (or a list of validators) that is used for
non-``None`` values.
:type validator: callable or `list` of callables.
.. versionadded:: 15.1.0
.. versionchanged:: 17.1.0 *validator* can be a list of validators.
"""
if isinstance(validator, list):
return _OptionalValidator(_AndValidator(validator))
return _OptionalValidator(validator)
@attrs(repr=False, slots=True, hash=True)
class _InValidator(object):
options = attrib()
def __call__(self, inst, attr, value):
try:
in_options = value in self.options
except TypeError: # e.g. `1 in "abc"`
in_options = False
if not in_options:
raise ValueError(
"'{name}' must be in {options!r} (got {value!r})".format(
name=attr.name, options=self.options, value=value
)
)
def __repr__(self):
return "<in_ validator with options {options!r}>".format(
options=self.options
)
def in_(options):
"""
A validator that raises a `ValueError` if the initializer is called
with a value that does not belong in the options provided. The check is
performed using ``value in options``.
:param options: Allowed options.
:type options: list, tuple, `enum.Enum`, ...
:raises ValueError: With a human readable error message, the attribute (of
type `attrs.Attribute`), the expected options, and the value it
got.
.. versionadded:: 17.1.0
"""
return _InValidator(options)
@attrs(repr=False, slots=False, hash=True)
class _IsCallableValidator(object):
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not callable(value):
message = (
"'{name}' must be callable "
"(got {value!r} that is a {actual!r})."
)
raise NotCallableError(
msg=message.format(
name=attr.name, value=value, actual=value.__class__
),
value=value,
)
def __repr__(self):
return "<is_callable validator>"
def is_callable():
"""
A validator that raises a `attr.exceptions.NotCallableError` if the
initializer is called with a value for this particular attribute
that is not callable.
.. versionadded:: 19.1.0
:raises `attr.exceptions.NotCallableError`: With a human readable error
message containing the attribute (`attrs.Attribute`) name,
and the value it got.
"""
return _IsCallableValidator()
@attrs(repr=False, slots=True, hash=True)
class _DeepIterable(object):
member_validator = attrib(validator=is_callable())
iterable_validator = attrib(
default=None, validator=optional(is_callable())
)
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.iterable_validator is not None:
self.iterable_validator(inst, attr, value)
for member in value:
self.member_validator(inst, attr, member)
def __repr__(self):
iterable_identifier = (
""
if self.iterable_validator is None
else " {iterable!r}".format(iterable=self.iterable_validator)
)
return (
"<deep_iterable validator for{iterable_identifier}"
" iterables of {member!r}>"
).format(
iterable_identifier=iterable_identifier,
member=self.member_validator,
)
def deep_iterable(member_validator, iterable_validator=None):
"""
A validator that performs deep validation of an iterable.
:param member_validator: Validator to apply to iterable members
:param iterable_validator: Validator to apply to iterable itself
(optional)
.. versionadded:: 19.1.0
:raises TypeError: if any sub-validators fail
"""
return _DeepIterable(member_validator, iterable_validator)
@attrs(repr=False, slots=True, hash=True)
class _DeepMapping(object):
key_validator = attrib(validator=is_callable())
value_validator = attrib(validator=is_callable())
mapping_validator = attrib(default=None, validator=optional(is_callable()))
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.mapping_validator is not None:
self.mapping_validator(inst, attr, value)
for key in value:
self.key_validator(inst, attr, key)
self.value_validator(inst, attr, value[key])
def __repr__(self):
return (
"<deep_mapping validator for objects mapping {key!r} to {value!r}>"
).format(key=self.key_validator, value=self.value_validator)
def deep_mapping(key_validator, value_validator, mapping_validator=None):
"""
A validator that performs deep validation of a dictionary.
:param key_validator: Validator to apply to dictionary keys
:param value_validator: Validator to apply to dictionary values
:param mapping_validator: Validator to apply to top-level mapping
attribute (optional)
.. versionadded:: 19.1.0
:raises TypeError: if any sub-validators fail
"""
return _DeepMapping(key_validator, value_validator, mapping_validator)
@attrs(repr=False, frozen=True, slots=True)
class _NumberValidator(object):
bound = attrib()
compare_op = attrib()
compare_func = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.compare_func(value, self.bound):
raise ValueError(
"'{name}' must be {op} {bound}: {value}".format(
name=attr.name,
op=self.compare_op,
bound=self.bound,
value=value,
)
)
def __repr__(self):
return "<Validator for x {op} {bound}>".format(
op=self.compare_op, bound=self.bound
)
def lt(val):
"""
A validator that raises `ValueError` if the initializer is called
with a number larger or equal to *val*.
:param val: Exclusive upper bound for values
.. versionadded:: 21.3.0
"""
return _NumberValidator(val, "<", operator.lt)
def le(val):
"""
A validator that raises `ValueError` if the initializer is called
with a number greater than *val*.
:param val: Inclusive upper bound for values
.. versionadded:: 21.3.0
"""
return _NumberValidator(val, "<=", operator.le)
def ge(val):
"""
A validator that raises `ValueError` if the initializer is called
with a number smaller than *val*.
:param val: Inclusive lower bound for values
.. versionadded:: 21.3.0
"""
return _NumberValidator(val, ">=", operator.ge)
def gt(val):
"""
A validator that raises `ValueError` if the initializer is called
with a number smaller or equal to *val*.
:param val: Exclusive lower bound for values
.. versionadded:: 21.3.0
"""
return _NumberValidator(val, ">", operator.gt)
@attrs(repr=False, frozen=True, slots=True)
class _MaxLengthValidator(object):
max_length = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if len(value) > self.max_length:
raise ValueError(
"Length of '{name}' must be <= {max}: {len}".format(
name=attr.name, max=self.max_length, len=len(value)
)
)
def __repr__(self):
return "<max_len validator for {max}>".format(max=self.max_length)
def max_len(length):
"""
A validator that raises `ValueError` if the initializer is called
with a string or iterable that is longer than *length*.
:param int length: Maximum length of the string or iterable
.. versionadded:: 21.3.0
"""
return _MaxLengthValidator(length)
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/licensedcode/_vendor/attr/validators.py
| 0.844569 | 0.261016 |
validators.py
|
pypi
|
from __future__ import absolute_import, division, print_function
from functools import total_ordering
from ._funcs import astuple
from ._make import attrib, attrs
@total_ordering
@attrs(eq=False, order=False, slots=True, frozen=True)
class VersionInfo(object):
"""
A version object that can be compared to tuple of length 1--4:
>>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2)
True
>>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1)
True
>>> vi = attr.VersionInfo(19, 2, 0, "final")
>>> vi < (19, 1, 1)
False
>>> vi < (19,)
False
>>> vi == (19, 2,)
True
>>> vi == (19, 2, 1)
False
.. versionadded:: 19.2
"""
year = attrib(type=int)
minor = attrib(type=int)
micro = attrib(type=int)
releaselevel = attrib(type=str)
@classmethod
def _from_version_string(cls, s):
"""
Parse *s* and return a _VersionInfo.
"""
v = s.split(".")
if len(v) == 3:
v.append("final")
return cls(
year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3]
)
def _ensure_tuple(self, other):
"""
Ensure *other* is a tuple of a valid length.
Returns a possibly transformed *other* and ourselves as a tuple of
the same length as *other*.
"""
if self.__class__ is other.__class__:
other = astuple(other)
if not isinstance(other, tuple):
raise NotImplementedError
if not (1 <= len(other) <= 4):
raise NotImplementedError
return astuple(self)[: len(other)], other
def __eq__(self, other):
try:
us, them = self._ensure_tuple(other)
except NotImplementedError:
return NotImplemented
return us == them
def __lt__(self, other):
try:
us, them = self._ensure_tuple(other)
except NotImplementedError:
return NotImplemented
# Since alphabetically "dev0" < "final" < "post1" < "post2", we don't
# have to do anything special with releaselevel for now.
return us < them
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/licensedcode/_vendor/attr/_version_info.py
| 0.805517 | 0.209389 |
_version_info.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import copy
from ._compat import iteritems
from ._make import NOTHING, _obj_setattr, fields
from .exceptions import AttrsAttributeNotFoundError
def asdict(
inst,
recurse=True,
filter=None,
dict_factory=dict,
retain_collection_types=False,
value_serializer=None,
):
"""
Return the ``attrs`` attribute values of *inst* as a dict.
Optionally recurse into other ``attrs``-decorated classes.
:param inst: Instance of an ``attrs``-decorated class.
:param bool recurse: Recurse into classes that are also
``attrs``-decorated.
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the `attrs.Attribute` as the first argument and the
value as the second argument.
:param callable dict_factory: A callable to produce dictionaries from. For
example, to produce ordered dictionaries instead of normal Python
dictionaries, pass in ``collections.OrderedDict``.
:param bool retain_collection_types: Do not convert to ``list`` when
encountering an attribute whose type is ``tuple`` or ``set``. Only
meaningful if ``recurse`` is ``True``.
:param Optional[callable] value_serializer: A hook that is called for every
attribute or dict key/value. It receives the current instance, field
and value and must return the (updated) value. The hook is run *after*
the optional *filter* has been applied.
:rtype: return type of *dict_factory*
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 16.0.0 *dict_factory*
.. versionadded:: 16.1.0 *retain_collection_types*
.. versionadded:: 20.3.0 *value_serializer*
.. versionadded:: 21.3.0 If a dict has a collection for a key, it is
serialized as a tuple.
"""
attrs = fields(inst.__class__)
rv = dict_factory()
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if value_serializer is not None:
v = value_serializer(inst, a, v)
if recurse is True:
if has(v.__class__):
rv[a.name] = asdict(
v,
recurse=True,
filter=filter,
dict_factory=dict_factory,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
)
elif isinstance(v, (tuple, list, set, frozenset)):
cf = v.__class__ if retain_collection_types is True else list
rv[a.name] = cf(
[
_asdict_anything(
i,
is_key=False,
filter=filter,
dict_factory=dict_factory,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
)
for i in v
]
)
elif isinstance(v, dict):
df = dict_factory
rv[a.name] = df(
(
_asdict_anything(
kk,
is_key=True,
filter=filter,
dict_factory=df,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
),
_asdict_anything(
vv,
is_key=False,
filter=filter,
dict_factory=df,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
),
)
for kk, vv in iteritems(v)
)
else:
rv[a.name] = v
else:
rv[a.name] = v
return rv
def _asdict_anything(
val,
is_key,
filter,
dict_factory,
retain_collection_types,
value_serializer,
):
"""
``asdict`` only works on attrs instances, this works on anything.
"""
if getattr(val.__class__, "__attrs_attrs__", None) is not None:
# Attrs class.
rv = asdict(
val,
recurse=True,
filter=filter,
dict_factory=dict_factory,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
)
elif isinstance(val, (tuple, list, set, frozenset)):
if retain_collection_types is True:
cf = val.__class__
elif is_key:
cf = tuple
else:
cf = list
rv = cf(
[
_asdict_anything(
i,
is_key=False,
filter=filter,
dict_factory=dict_factory,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
)
for i in val
]
)
elif isinstance(val, dict):
df = dict_factory
rv = df(
(
_asdict_anything(
kk,
is_key=True,
filter=filter,
dict_factory=df,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
),
_asdict_anything(
vv,
is_key=False,
filter=filter,
dict_factory=df,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
),
)
for kk, vv in iteritems(val)
)
else:
rv = val
if value_serializer is not None:
rv = value_serializer(None, None, rv)
return rv
def astuple(
inst,
recurse=True,
filter=None,
tuple_factory=tuple,
retain_collection_types=False,
):
"""
Return the ``attrs`` attribute values of *inst* as a tuple.
Optionally recurse into other ``attrs``-decorated classes.
:param inst: Instance of an ``attrs``-decorated class.
:param bool recurse: Recurse into classes that are also
``attrs``-decorated.
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the `attrs.Attribute` as the first argument and the
value as the second argument.
:param callable tuple_factory: A callable to produce tuples from. For
example, to produce lists instead of tuples.
:param bool retain_collection_types: Do not convert to ``list``
or ``dict`` when encountering an attribute which type is
``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is
``True``.
:rtype: return type of *tuple_factory*
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 16.2.0
"""
attrs = fields(inst.__class__)
rv = []
retain = retain_collection_types # Very long. :/
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if recurse is True:
if has(v.__class__):
rv.append(
astuple(
v,
recurse=True,
filter=filter,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
)
elif isinstance(v, (tuple, list, set, frozenset)):
cf = v.__class__ if retain is True else list
rv.append(
cf(
[
astuple(
j,
recurse=True,
filter=filter,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(j.__class__)
else j
for j in v
]
)
)
elif isinstance(v, dict):
df = v.__class__ if retain is True else dict
rv.append(
df(
(
astuple(
kk,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(kk.__class__)
else kk,
astuple(
vv,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(vv.__class__)
else vv,
)
for kk, vv in iteritems(v)
)
)
else:
rv.append(v)
else:
rv.append(v)
return rv if tuple_factory is list else tuple_factory(rv)
def has(cls):
"""
Check whether *cls* is a class with ``attrs`` attributes.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:rtype: bool
"""
return getattr(cls, "__attrs_attrs__", None) is not None
def assoc(inst, **changes):
"""
Copy *inst* and apply *changes*.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy.
:return: A copy of inst with *changes* incorporated.
:raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't
be found on *cls*.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. deprecated:: 17.1.0
Use `attrs.evolve` instead if you can.
This function will not be removed du to the slightly different approach
compared to `attrs.evolve`.
"""
import warnings
warnings.warn(
"assoc is deprecated and will be removed after 2018/01.",
DeprecationWarning,
stacklevel=2,
)
new = copy.copy(inst)
attrs = fields(inst.__class__)
for k, v in iteritems(changes):
a = getattr(attrs, k, NOTHING)
if a is NOTHING:
raise AttrsAttributeNotFoundError(
"{k} is not an attrs attribute on {cl}.".format(
k=k, cl=new.__class__
)
)
_obj_setattr(new, k, v)
return new
def evolve(inst, **changes):
"""
Create a new instance, based on *inst* with *changes* applied.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy.
:return: A copy of inst with *changes* incorporated.
:raise TypeError: If *attr_name* couldn't be found in the class
``__init__``.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 17.1.0
"""
cls = inst.__class__
attrs = fields(cls)
for a in attrs:
if not a.init:
continue
attr_name = a.name # To deal with private attributes.
init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
if init_name not in changes:
changes[init_name] = getattr(inst, attr_name)
return cls(**changes)
def resolve_types(cls, globalns=None, localns=None, attribs=None):
"""
Resolve any strings and forward annotations in type annotations.
This is only required if you need concrete types in `Attribute`'s *type*
field. In other words, you don't need to resolve your types if you only
use them for static type checking.
With no arguments, names will be looked up in the module in which the class
was created. If this is not what you want, e.g. if the name only exists
inside a method, you may pass *globalns* or *localns* to specify other
dictionaries in which to look up these names. See the docs of
`typing.get_type_hints` for more details.
:param type cls: Class to resolve.
:param Optional[dict] globalns: Dictionary containing global variables.
:param Optional[dict] localns: Dictionary containing local variables.
:param Optional[list] attribs: List of attribs for the given class.
This is necessary when calling from inside a ``field_transformer``
since *cls* is not an ``attrs`` class yet.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class and you didn't pass any attribs.
:raise NameError: If types cannot be resolved because of missing variables.
:returns: *cls* so you can use this function also as a class decorator.
Please note that you have to apply it **after** `attrs.define`. That
means the decorator has to come in the line **before** `attrs.define`.
.. versionadded:: 20.1.0
.. versionadded:: 21.1.0 *attribs*
"""
# Since calling get_type_hints is expensive we cache whether we've
# done it already.
if getattr(cls, "__attrs_types_resolved__", None) != cls:
import typing
hints = typing.get_type_hints(cls, globalns=globalns, localns=localns)
for field in fields(cls) if attribs is None else attribs:
if field.name in hints:
# Since fields have been frozen we must work around it.
_obj_setattr(field, "type", hints[field.name])
# We store the class we resolved so that subclasses know they haven't
# been resolved.
cls.__attrs_types_resolved__ = cls
# Return the class so you can use it as a decorator too.
return cls
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/licensedcode/_vendor/attr/_funcs.py
| 0.874158 | 0.177668 |
_funcs.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import functools
from ._compat import new_class
from ._make import _make_ne
_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="}
def cmp_using(
eq=None,
lt=None,
le=None,
gt=None,
ge=None,
require_same_type=True,
class_name="Comparable",
):
"""
Create a class that can be passed into `attr.ib`'s ``eq``, ``order``, and
``cmp`` arguments to customize field comparison.
The resulting class will have a full set of ordering methods if
at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided.
:param Optional[callable] eq: `callable` used to evaluate equality
of two objects.
:param Optional[callable] lt: `callable` used to evaluate whether
one object is less than another object.
:param Optional[callable] le: `callable` used to evaluate whether
one object is less than or equal to another object.
:param Optional[callable] gt: `callable` used to evaluate whether
one object is greater than another object.
:param Optional[callable] ge: `callable` used to evaluate whether
one object is greater than or equal to another object.
:param bool require_same_type: When `True`, equality and ordering methods
will return `NotImplemented` if objects are not of the same type.
:param Optional[str] class_name: Name of class. Defaults to 'Comparable'.
See `comparison` for more details.
.. versionadded:: 21.1.0
"""
body = {
"__slots__": ["value"],
"__init__": _make_init(),
"_requirements": [],
"_is_comparable_to": _is_comparable_to,
}
# Add operations.
num_order_functions = 0
has_eq_function = False
if eq is not None:
has_eq_function = True
body["__eq__"] = _make_operator("eq", eq)
body["__ne__"] = _make_ne()
if lt is not None:
num_order_functions += 1
body["__lt__"] = _make_operator("lt", lt)
if le is not None:
num_order_functions += 1
body["__le__"] = _make_operator("le", le)
if gt is not None:
num_order_functions += 1
body["__gt__"] = _make_operator("gt", gt)
if ge is not None:
num_order_functions += 1
body["__ge__"] = _make_operator("ge", ge)
type_ = new_class(class_name, (object,), {}, lambda ns: ns.update(body))
# Add same type requirement.
if require_same_type:
type_._requirements.append(_check_same_type)
# Add total ordering if at least one operation was defined.
if 0 < num_order_functions < 4:
if not has_eq_function:
# functools.total_ordering requires __eq__ to be defined,
# so raise early error here to keep a nice stack.
raise ValueError(
"eq must be define is order to complete ordering from "
"lt, le, gt, ge."
)
type_ = functools.total_ordering(type_)
return type_
def _make_init():
"""
Create __init__ method.
"""
def __init__(self, value):
"""
Initialize object with *value*.
"""
self.value = value
return __init__
def _make_operator(name, func):
"""
Create operator method.
"""
def method(self, other):
if not self._is_comparable_to(other):
return NotImplemented
result = func(self.value, other.value)
if result is NotImplemented:
return NotImplemented
return result
method.__name__ = "__%s__" % (name,)
method.__doc__ = "Return a %s b. Computed by attrs." % (
_operation_names[name],
)
return method
def _is_comparable_to(self, other):
"""
Check whether `other` is comparable to `self`.
"""
for func in self._requirements:
if not func(self, other):
return False
return True
def _check_same_type(self, other):
"""
Return True if *self* and *other* are of the same type, False otherwise.
"""
return other.value.__class__ is self.value.__class__
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/licensedcode/_vendor/attr/_cmp.py
| 0.847021 | 0.336876 |
_cmp.py
|
pypi
|
def get_relative_path(root_path, path):
"""
Return a path relativefrom the posix 'path' relative to a
base path of `len_base_path` length where the base is a directory if
`base_is_dir` True or a file otherwise.
"""
return path[len(root_path):].lstrip('/')
LEGAL_STARTS_ENDS = (
'copying',
'copyright',
'copyrights',
'copyleft',
'notice',
'license',
'licenses',
'licence',
'licences',
'licensing',
'licencing',
'legal',
'eula',
'agreement',
'copyleft',
'patent',
'patents',
)
_MANIFEST_ENDS = {
'.about': 'ABOUT file',
'/bower.json': 'bower',
'/project.clj': 'clojure',
'.podspec': 'cocoapod',
'/composer.json': 'composer',
'/description': 'cran',
'/elm-package.json': 'elm',
'/+compact_manifest': 'freebsd',
'+manifest': 'freebsd',
'.gemspec': 'gem',
'/metadata': 'gem',
# the extracted metadata of a gem archive
'/metadata.gz-extract': 'gem',
'/build.gradle': 'gradle',
'/project.clj': 'clojure',
'.pom': 'maven',
'/pom.xml': 'maven',
'.cabal': 'haskell',
'/haxelib.json': 'haxe',
'/package.json': 'npm',
'.nuspec': 'nuget',
'.pod': 'perl',
'/meta.yml': 'perl',
'/dist.ini': 'perl',
'/pipfile': 'pypi',
'/setup.cfg': 'pypi',
'/setup.py': 'pypi',
'/PKG-INFO': 'pypi',
'/pyproject.toml': 'pypi',
'.spec': 'rpm',
'/cargo.toml': 'rust',
'.spdx': 'spdx',
'/dependencies': 'generic',
# note that these two cannot be top-level for now
'debian/copyright': 'deb',
'meta-inf/manifest.mf': 'maven',
# TODO: Maven also has sometimes a pom under META-INF/
# 'META-INF/manifest.mf': 'JAR and OSGI',
}
MANIFEST_ENDS = tuple(_MANIFEST_ENDS)
README_STARTS_ENDS = (
'readme',
)
def check_resource_name_start_and_end(resource, STARTS_ENDS):
"""
Return True if `resource.name` or `resource.base_name` begins or ends with
an element of `STARTS_ENDS`
"""
name = resource.name.lower()
base_name = resource.base_name.lower()
return (
name.startswith(STARTS_ENDS)
or name.endswith(STARTS_ENDS)
or base_name.startswith(STARTS_ENDS)
or base_name.endswith(STARTS_ENDS)
)
def set_classification_flags(resource,
_LEGAL=LEGAL_STARTS_ENDS,
_MANIF=MANIFEST_ENDS,
_README=README_STARTS_ENDS,
):
"""
Set classification flags on the `resource` Resource
"""
path = resource.path.lower()
resource.is_legal = is_legal = check_resource_name_start_and_end(resource, _LEGAL)
resource.is_readme = is_readme = check_resource_name_start_and_end(resource, _README)
# FIXME: this will never be picked up as this is NOT available in a pre-scan plugin
has_package_data = bool(getattr(resource, 'package_data', False))
resource.is_manifest = is_manifest = path.endswith(_MANIF) or has_package_data
resource.is_key_file = (resource.is_top_level and (is_readme or is_legal or is_manifest))
return resource
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/summarycode/classify.py
| 0.479016 | 0.190611 |
classify.py
|
pypi
|
from collections import defaultdict
import attr
import click
from commoncode.fileset import get_matches as get_fileset_matches
from plugincode.pre_scan import PreScanPlugin
from plugincode.pre_scan import pre_scan_impl
from commoncode.cliutils import PluggableCommandLineOption
from commoncode.cliutils import PRE_SCAN_GROUP
# Tracing flag
TRACE = False
def logger_debug(*args):
pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
"""
Assign a facet to a file.
A facet is defined by zero or more glob/fnmatch expressions. Multiple facets can
be assigned to a file. The facets definition is a list of (facet, pattern) and a
file is assigned all the facets that have a pattern defintion that match their
path.
Once all files have been assigned a facet, files without a facet are assigned to
the core facet.
The known facets are:
- core - core files of a package. Used as default if no other facet apply.
- data - data files of a package (such as CSV, etc).
- dev - files used at development time (e.g. build scripts, dev tools, etc)
- docs - Documentation files.
- examples - Code example files.
- tests - Test files and tools.
- thirdparty - Embedded code from a third party (aka. vendored or bundled)
See also https://github.com/clearlydefined/clearlydefined/blob/8f58a9a216cf7c129fe2cf6abe1cc6f960535e0b/docs/clearly.md#facets
"""
FACET_CORE = 'core'
FACET_DEV = 'dev'
FACET_TESTS = 'tests'
FACET_DOCS = 'docs'
FACET_DATA = 'data'
FACET_EXAMPLES = 'examples'
FACETS = (
FACET_CORE,
FACET_DEV,
FACET_TESTS,
FACET_DOCS,
FACET_DATA,
FACET_EXAMPLES,
)
def validate_facets(ctx, param, value):
"""
Return the facets if valid or raise a UsageError otherwise.
Validate facets values against the list of known facets.
"""
if not value:
return
_facet_patterns, invalid_facet_definitions = build_facets(value)
if invalid_facet_definitions:
known_msg = ', '.join(FACETS)
uf = '\n'.join(sorted(' ' + x for x in invalid_facet_definitions))
msg = ('Invalid --facet option(s):\n'
'{uf}\n'
'Valid <facet> values are: {known_msg}.\n'.format(**locals()))
raise click.UsageError(msg)
return value
@pre_scan_impl
class AddFacet(PreScanPlugin):
"""
Assign one or more "facet" to each file (and NOT to directories). Facets are
a way to qualify that some part of the scanned code may be core code vs.
test vs. data, etc.
"""
resource_attributes = dict(facets=attr.ib(default=attr.Factory(list), repr=False))
run_order = 20
sort_order = 20
options = [
PluggableCommandLineOption(('--facet',),
multiple=True,
metavar='<facet>=<pattern>',
callback=validate_facets,
help='Add the <facet> to files with a path matching <pattern>.',
help_group=PRE_SCAN_GROUP,
sort_order=80,
)
]
def is_enabled(self, facet, **kwargs):
if TRACE:
logger_debug('is_enabled: facet:', facet)
return bool(facet)
def process_codebase(self, codebase, facet=(), **kwargs):
"""
Add facets to file resources using the `facet` definition of facets.
Each entry in the `facet` sequence is a string as in <facet>:<pattern>
"""
if not facet:
return
facet_definitions, _invalid_facet_definitions = build_facets(facet)
if TRACE:
logger_debug('facet_definitions:', facet_definitions)
# Walk the codebase and set the facets for each file (and only files)
for resource in codebase.walk(topdown=True):
if not resource.is_file:
continue
facets = compute_path_facets(resource.path, facet_definitions)
if facets:
resource.facets = facets
else:
resource.facets = [FACET_CORE]
resource.save(codebase)
def compute_path_facets(path, facet_definitions):
"""
Return a sorted list of unique facet strings for `path` using the
`facet_definitions` mapping of {pattern: [facet, facet]}.
"""
if not path or not path.strip() or not facet_definitions:
return []
facets = set()
for matches in get_fileset_matches(path, facet_definitions, all_matches=True):
facets.update(matches)
return sorted(facets)
def build_facets(facets, known_facet_names=FACETS):
"""
Return:
- a mapping for facet patterns to a list of unique facet names as
{pattern: [facet, facet, ...]}
- a sorted list of error messages for invalid or unknown facet definitions
found in `facets`.
The `known` facets set of known facets is used for validation.
"""
invalid_facet_definitions = set()
facet_patterns = defaultdict(list)
for facet_def in facets:
facet, _, pattern = facet_def.partition('=')
facet = facet.strip().lower()
pattern = pattern.strip()
if not pattern:
invalid_facet_definitions.add(
'missing <pattern> in "{facet_def}".'.format(**locals()))
continue
if not facet:
invalid_facet_definitions.add(
'missing <facet> in "{facet_def}".'.format(**locals()))
continue
if facet not in known_facet_names:
invalid_facet_definitions.add(
'unknown <facet> in "{facet_def}".'.format(**locals()))
continue
facets = facet_patterns[pattern]
if facet not in facets:
facet_patterns[pattern].append(facet)
return facet_patterns, sorted(invalid_facet_definitions)
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/summarycode/facet.py
| 0.681833 | 0.260848 |
facet.py
|
pypi
|
def get_resource_summary(resource, key, as_attribute=False):
"""
Return the "summary" value as mapping for the `key` summary attribute of a
resource.
This is collected either from a direct Resource.summary attribute if
`as_attribute` is True or as a Resource.extra_data summary item otherwise.
"""
if as_attribute:
summary = resource.summary
else:
summary = resource.extra_data.get('summary', {})
summary = summary or {}
return summary.get(key) or None
def set_resource_summary(resource, key, value, as_attribute=False):
"""
Set `value` as the "summary" value for the `key` summary attribute of a
resource
This is set either in a direct Resource.summary attribute if `as_attribute`
is True or as a Resource.extra_data summary item otherwise.
"""
if as_attribute:
resource.summary[key] = value
else:
summary = resource.extra_data.get('summary')
if not summary:
summary = dict([(key, value)])
resource.extra_data['summary'] = summary
summary[key] = value
def sorted_counter(counter):
"""
Return a list of ordered mapping of {value:val, count:cnt} built from a
`counter` mapping of {value: count} and sortedd by decreasing count then by
value.
"""
def by_count_value(value_count):
value, count = value_count
return -count, value or ''
summarized = [
dict([('value', value), ('count', count)])
for value, count in sorted(counter.items(), key=by_count_value)]
return summarized
def get_resource_tallies(resource, key, as_attribute=False):
"""
Return the "tallies" value as mapping for the `key` tallies attribute of a
resource.
This is collected either from a direct Resource.tallies attribute if
`as_attribute` is True or as a Resource.extra_data tallies item otherwise.
"""
if as_attribute:
tallies = resource.tallies
else:
tallies = resource.extra_data.get('tallies', {})
tallies = tallies or {}
return tallies.get(key) or None
def set_resource_tallies(resource, key, value, as_attribute=False):
"""
Set `value` as the "tallies" value for the `key` tallies attribute of a
resource
This is set either in a direct Resource.tallies attribute if `as_attribute`
is True or as a Resource.extra_data tallies item otherwise.
"""
if as_attribute:
resource.tallies[key] = value
else:
tallies = resource.extra_data.get('tallies')
if not tallies:
tallies = dict([(key, value)])
resource.extra_data['tallies'] = tallies
tallies[key] = value
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/summarycode/utils.py
| 0.874319 | 0.429609 |
utils.py
|
pypi
|
from collections import defaultdict
import re
import attr
import fingerprints
from text_unidecode import unidecode
from cluecode.copyrights import CopyrightDetector
from commoncode.text import toascii
from summarycode.utils import sorted_counter
from summarycode.utils import get_resource_tallies
from summarycode.utils import set_resource_tallies
# Tracing flags
TRACE = False
TRACE_FP = False
TRACE_DEEP = False
TRACE_TEXT = False
TRACE_CANO = False
def logger_debug(*args):
pass
if TRACE or TRACE_CANO:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
# TODO: keep the original order of statements as much as possible
def copyright_tallies(resource, children, keep_details=False):
return build_tallies(
resource=resource,
children=children,
attributes_list='copyrights',
attribute_value='copyright',
tallier=tally_copyrights,
keep_details=keep_details
)
def holder_tallies(resource, children, keep_details=False):
return build_tallies(
resource=resource,
children=children,
attributes_list='holders',
attribute_value='holder',
tallier=tally_persons,
keep_details=keep_details
)
def author_tallies(resource, children, keep_details=False):
return build_tallies(
resource=resource,
children=children,
attributes_list='authors',
attribute_value='author',
tallier=tally_persons,
keep_details=keep_details
)
def build_tallies(
resource,
children,
attributes_list,
attribute_value,
tallier,
keep_details=False,
):
"""
Update the ``resource`` Resource with a tally of scan fields from itself and its
``children``.
Resources and this for the `attributes_list` values list key (such as
copyrights, etc) and the ``attribute_value`` details key (such as copyright).
- `attributes_list` is the name of the attribute values list
('copyrights', 'holders' etc.)
- `attribute_value` is the name of the attribute value key in this list
('copyright', 'holder' etc.)
- `tallier` is a function that takes a list of texts and returns
texts with counts
"""
# Collect current data
values = getattr(resource, attributes_list, [])
no_detection_counter = 0
if values:
# keep current data as plain strings
candidate_texts = [entry.get(attribute_value) for entry in values]
else:
candidate_texts = []
if resource.is_file:
no_detection_counter += 1
# Collect direct children existing summaries
for child in children:
child_summaries = get_resource_tallies(
child,
key=attributes_list,
as_attribute=keep_details
) or []
for child_summary in child_summaries:
count = child_summary['count']
value = child_summary['value']
if value:
candidate_texts.append(Text(value, value, count))
else:
no_detection_counter += count
# summarize proper using the provided function
tallied = tallier(candidate_texts)
# add back the counter of things without detection
if no_detection_counter:
tallied.update({None: no_detection_counter})
tallied = sorted_counter(tallied)
if TRACE:
logger_debug('COPYRIGHT tallied:', tallied)
set_resource_tallies(
resource,
key=attributes_list,
value=tallied,
as_attribute=keep_details,
)
return tallied
# keep track of an original text value and the corresponding clustering "key"
@attr.attributes(slots=True)
class Text(object):
# cleaned, normalized, clustering text for a copyright holder
key = attr.attrib()
# original text for a copyright holder
original = attr.attrib()
# count of occurences of a text
count = attr.attrib(default=1)
def normalize(self):
if TRACE_TEXT:
logger_debug('Text.normalize:', self)
key = self.key.lower()
key = ' '.join(key.split())
key = key.strip('.,').strip()
key = clean(key)
self.key = key.strip('.,').strip()
def transliterate(self):
self.key = toascii(self.key, translit=True)
def fingerprint(self):
key = self.key
if not isinstance(key, str):
key = unidecode(key)
fp = fingerprints.generate(key)
if TRACE_TEXT or TRACE_FP:
logger_debug('Text.fingerprint:key: ', repr(self.key))
logger_debug('Text.fingerprint:fp : ', fingerprints.generate(unidecode(self.key)))
self.key = fp
def tally_copyrights(texts, _detector=CopyrightDetector()):
"""
Return a list of mapping of {value:string, count:int} given a
list of copyright strings or Text() objects.
"""
texts_to_tally = []
no_detection_counter = 0
for text in texts:
if not text:
no_detection_counter += 1
continue
# Keep Text objects as-is
if isinstance(text, Text):
texts_to_tally.append(text)
else:
# FIXME: redetect to strip year should not be needed!!
statements_without_years = _detector.detect(
[(1, text)],
include_copyrights=True,
include_holders=False,
include_authors=False,
include_copyright_years=False,
)
for detection in statements_without_years:
copyr = detection.copyright
texts_to_tally.append(Text(copyr, copyr))
counter = tally(texts_to_tally)
if no_detection_counter:
counter[None] = no_detection_counter
return counter
def tally_persons(texts):
"""
Return a list of mapping of {value:string, count:int} given a
list of holders strings or Text() objects.
"""
texts_to_tally = []
no_detection_counter = 0
for text in texts:
if not text:
no_detection_counter += 1
continue
# Keep Text objects as-is
if isinstance(text, Text):
texts_to_tally.append(text)
else:
cano = canonical_holder(text)
texts_to_tally.append(Text(cano, cano))
counter = tally(texts_to_tally)
if no_detection_counter:
counter[None] = no_detection_counter
return counter
def tally(summary_texts):
"""
Return a mapping of {value: count} given a list of Text objects
(representing either copyrights, holders or authors).
"""
if TRACE:
logger_debug('summarize: INITIAL texts:')
for s in summary_texts:
logger_debug(' ', s)
for text in summary_texts:
text.normalize()
if TRACE_DEEP:
logger_debug('summarize: NORMALIZED 1 texts:')
for s in summary_texts:
logger_debug(' ', s)
texts = list(filter_junk(summary_texts))
if TRACE_DEEP:
logger_debug('summarize: DEJUNKED texts:')
for s in summary_texts:
logger_debug(' ', s)
for t in texts:
t.normalize()
if TRACE_DEEP:
logger_debug('summarize: NORMALIZED 2 texts:')
for s in summary_texts:
logger_debug(' ', s)
# keep non-empties
texts = list(t for t in texts if t.key)
if TRACE_DEEP:
logger_debug('summarize: NON-EMPTY 1 texts:')
for s in summary_texts:
logger_debug(' ', s)
# convert to plain ASCII, then fingerprint
for t in texts:
t.transliterate()
if TRACE_DEEP:
logger_debug('summarize: ASCII texts:')
for s in summary_texts:
logger_debug(' ', s)
for t in texts:
t.fingerprint()
if TRACE_DEEP or TRACE_FP:
logger_debug('summarize: FINGERPRINTED texts:')
for s in summary_texts:
logger_debug(' ', s)
# keep non-empties
texts = list(t for t in texts if t.key)
if TRACE_DEEP:
logger_debug('summarize: NON-EMPTY 2 texts:')
for s in summary_texts:
logger_debug(' ', s)
# cluster
clusters = cluster(texts)
if TRACE_DEEP:
clusters = list(clusters)
logger_debug('summarize: CLUSTERS:')
for c in clusters:
logger_debug(' ', c)
counter = {text.original: count for text, count in clusters}
if TRACE:
logger_debug('summarize: FINAL SUMMARIZED:')
for c in counter:
logger_debug(' ', c)
return counter
def cluster(texts):
"""
Given a `texts` iterable of Text objects, group these objects when they have the
same key. Yield a tuple of (Text object, count of its occurences).
"""
clusters = defaultdict(list)
for text in texts:
clusters[text.key].append(text)
for cluster_key, cluster_texts in clusters.items():
try:
# keep the longest as the representative value for a cluster
cluster_texts.sort(key=lambda x:-len(x.key))
representative = cluster_texts[0]
count = sum(t.count for t in cluster_texts)
if TRACE_DEEP:
logger_debug('cluster: representative, count', representative, count)
yield representative, count
except Exception as e:
msg = (
f'Error in cluster(): cluster_key: {cluster_key!r}, '
f'cluster_texts: {cluster_texts!r}\n'
)
import traceback
msg += traceback.format_exc()
raise Exception(msg) from e
def clean(text):
"""
Return an updated and cleaned Text object from a `text` Text object
normalizing some pucntuations around some name and acronyms.
"""
if not text:
return text
text = text.replace('A. M.', 'A.M.')
return text
# set of common prefixes that can be trimmed from a name
prefixes = frozenset([
'his',
'by',
'from',
'and',
'of',
'for',
'<p>',
])
def strip_prefixes(s, prefixes=prefixes):
"""
Return the `s` string with any of the string in the `prefixes` set
striped from the left. Normalize and strip spaces.
For example:
>>> s = 'by AND for the Free Software Foundation'
>>> strip_prefixes(s) == 'the Free Software Foundation'
True
"""
s = s.split()
while s and s[0].lower().strip().strip('.,') in prefixes:
s = s[1:]
return ' '.join(s)
# set of suffixes that can be stripped from a name
suffixes = frozenset([
'(minizip)',
])
def strip_suffixes(s, suffixes=suffixes):
"""
Return the `s` string with any of the string in the `suffixes` set
striped from the right. Normalize and strip spaces.
For example:
>>> s = 'RedHat Inc corp'
>>> strip_suffixes(s, set(['corp'])) == 'RedHat Inc'
True
"""
s = s.split()
while s and s[-1].lower().strip().strip('.,') in suffixes:
s = s[:-1]
return u' '.join(s)
# TODO: we need a gazeteer of places and or use usaddress and probablepeople or
# refine the POS tagging to catch these better
JUNK_HOLDERS = frozenset([
'advanced computing',
'inc',
'llc',
'ltd',
'berlin',
'munich',
'massachusetts',
'maynard',
'cambridge',
'norway',
'and',
'is',
'a',
'cedar rapids',
'iowa',
'u.s.a',
'u.s.a.',
'usa',
'source code',
'mountain view',
'england',
'web applications',
'menlo park',
'california',
'irvine',
'pune',
'india',
'stockholm',
'sweden',
'sweden)',
'software',
'france',
'concord',
'date here',
'software',
'not',
])
def filter_junk(texts):
"""
Filter junk from an iterable of texts objects.
"""
for text in texts:
if not text.key:
continue
if text.key.lower() in JUNK_HOLDERS:
continue
if text.key.isdigit():
continue
if len(text.key) == 1:
continue
yield text
# Mapping of commonly abbreviated names to their expanded, canonical forms.
# This is mostly of use when these common names show as holders without their
# proper company suffix
COMMON_NAMES = {
'3dfxinteractiveinc.': '3dfx Interactive',
'cern': 'CERN - European Organization for Nuclear Research',
'ciscosystemsinc': 'Cisco Systems',
'ciscosystems': 'Cisco Systems',
'cisco': 'Cisco Systems',
'daisy': 'Daisy',
'daisyltd': 'Daisy',
'fsf': 'Free Software Foundation',
'freesoftwarefoundation': 'Free Software Foundation',
'freesoftwarefoundationinc': 'Free Software Foundation',
'thefreesoftwarefoundation': 'Free Software Foundation',
'thefreesoftwarefoundationinc': 'Free Software Foundation',
'hp': 'Hewlett-Packard',
'hewlettpackard': 'Hewlett-Packard',
'hewlettpackardco': 'Hewlett-Packard',
'hpcompany': 'Hewlett-Packard',
'hpdevelopmentcompanylp': 'Hewlett-Packard',
'hpdevelopmentcompany': 'Hewlett-Packard',
'hewlettpackardcompany': 'Hewlett-Packard',
'theandroidopensourceproject': 'Android Open Source Project',
'androidopensourceproject': 'Android Open Source Project',
'ibm': 'IBM',
'redhat': 'Red Hat',
'redhatinc': 'Red Hat',
'softwareinthepublicinterest': 'Software in the Public Interest',
'spiinc': 'Software in the Public Interest',
'suse': 'SuSE',
'suseinc': 'SuSE',
'sunmicrosystems': 'Sun Microsystems',
'sunmicrosystemsinc': 'Sun Microsystems',
'sunmicro': 'Sun Microsystems',
'thaiopensourcesoftwarecenter': 'Thai Open Source Software Center',
'apachefoundation': 'The Apache Software Foundation',
'apachegroup': 'The Apache Software Foundation',
'apache': 'The Apache Software Foundation',
'apachesoftwarefoundation': 'The Apache Software Foundation',
'theapachegroup': 'The Apache Software Foundation',
'eclipse': 'The Eclipse Foundation',
'eclipsefoundation': 'The Eclipse Foundation',
'regentsoftheuniversityofcalifornia': 'The Regents of the University of California',
'borland': 'Borland',
'borlandcorp': 'Borland',
'microsoft': 'Microsoft',
'microsoftcorp': 'Microsoft',
'microsoftinc': 'Microsoft',
'microsoftcorporation': 'Microsoft',
'google': 'Google',
'googlellc': 'Google',
'googleinc': 'Google',
'intel': 'Intel',
}
# Remove everything except letters and numbers
_keep_only_chars = re.compile('[_\\W]+', re.UNICODE).sub # NOQA
def keep_only_chars(s):
return _keep_only_chars('', s)
def canonical_holder(s):
"""
Return a canonical holder for string `s` or s.
"""
key = keep_only_chars(s).lower()
cano = COMMON_NAMES.get(key)
if TRACE_CANO:
logger_debug('cano: for s:', s, 'with key:', key, 'is cano:', cano)
s = cano or s
s = strip_suffixes(s)
return s
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/summarycode/copyright_tallies.py
| 0.444806 | 0.157363 |
copyright_tallies.py
|
pypi
|
from collections import Counter
import attr
from commoncode.cliutils import POST_SCAN_GROUP, PluggableCommandLineOption
from plugincode.post_scan import PostScanPlugin, post_scan_impl
from summarycode.utils import (get_resource_tallies, set_resource_tallies,
sorted_counter)
# Tracing flags
TRACE = False
TRACE_LIGHT = False
def logger_debug(*args):
pass
if TRACE or TRACE_LIGHT:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
"""
Create summarized scan data.
"""
@post_scan_impl
class Tallies(PostScanPlugin):
"""
Compute tallies for license, copyright and other scans at the codebase level
"""
run_order = 15
sort_order = 15
codebase_attributes = dict(tallies=attr.ib(default=attr.Factory(dict)))
options = [
PluggableCommandLineOption(('--tallies',),
is_flag=True, default=False,
help='Compute tallies for license, copyright and other scans at the codebase level.',
help_group=POST_SCAN_GROUP)
]
def is_enabled(self, tallies, **kwargs):
return tallies
def process_codebase(self, codebase, tallies, **kwargs):
if TRACE_LIGHT: logger_debug('Tallies:process_codebase')
tallies = compute_codebase_tallies(codebase, keep_details=False, **kwargs)
codebase.attributes.tallies.update(tallies)
@post_scan_impl
class TalliesWithDetails(PostScanPlugin):
"""
Compute tallies of different scan attributes of a scan at the codebase level and
keep file and directory details.
The scan attributes that are tallied are:
- detected_license_expression
- copyrights
- holders
- authors
- programming_language
- packages
"""
# mapping of tally data at the codebase level for the whole codebase
codebase_attributes = dict(tallies=attr.ib(default=attr.Factory(dict)))
# store tallies at the file and directory level in this attribute when
# keep details is True
resource_attributes = dict(tallies=attr.ib(default=attr.Factory(dict)))
run_order = 100
sort_order = 100
options = [
PluggableCommandLineOption(('--tallies-with-details',),
is_flag=True, default=False,
help='Compute tallies of license, copyright and other scans at the codebase level, '
'keeping intermediate details at the file and directory level.',
help_group=POST_SCAN_GROUP)
]
def is_enabled(self, tallies_with_details, **kwargs):
return tallies_with_details
def process_codebase(self, codebase, tallies_with_details, **kwargs):
tallies = compute_codebase_tallies(codebase, keep_details=True, **kwargs)
codebase.attributes.tallies.update(tallies)
def compute_codebase_tallies(codebase, keep_details, **kwargs):
"""
Compute tallies of a scan at the codebase level for available scans.
If `keep_details` is True, also keep file and directory details in the
`tallies` file attribute for every file and directory.
"""
from summarycode.copyright_tallies import (author_tallies,
copyright_tallies,
holder_tallies)
attrib_summarizers = [
('detected_license_expression', license_tallies),
('copyrights', copyright_tallies),
('holders', holder_tallies),
('authors', author_tallies),
('programming_language', language_tallies),
('packages', package_tallies),
]
# find which attributes are available for summarization by checking the root
# resource
root = codebase.root
summarizers = [s for a, s in attrib_summarizers if hasattr(root, a)]
if TRACE: logger_debug('compute_codebase_tallies with:', summarizers)
# collect and set resource-level summaries
for resource in codebase.walk(topdown=False):
children = resource.children(codebase)
for summarizer in summarizers:
_summary_data = summarizer(resource, children, keep_details=keep_details)
if TRACE: logger_debug('tallies for:', resource.path, 'after tallies:', summarizer, 'is:', _summary_data)
codebase.save_resource(resource)
# set the tallies from the root resource at the codebase level
if keep_details:
tallies = root.tallies
else:
tallies = root.extra_data.get('tallies', {})
if TRACE: logger_debug('codebase tallies:', tallies)
return tallies
def license_tallies(resource, children, keep_details=False):
"""
Populate a license_expressions list of mappings such as
{value: "expression", count: "count of occurences"}
sorted by decreasing count.
"""
LIC_EXP = 'detected_license_expression'
LIC_DET = 'license_detections'
LIC_CLUE = 'license_clues'
license_expressions = []
# Collect current data
detected_expressions = []
for detection in getattr(resource, LIC_DET, []):
detected_expressions.append(detection["license_expression"])
for match in getattr(resource, LIC_CLUE, []):
detected_expressions.append(match["license_expression"])
package_license_detections = []
PACKAGE_DATA = 'package_data'
package_data = getattr(resource, PACKAGE_DATA, [])
if package_data:
package_license_detections.extend(
[
detection
for detection in getattr(package_data, LIC_DET, [])
if detection
]
)
for detection in package_license_detections:
detected_expressions.append(detection["license_expression"])
if not detected_expressions and resource.is_file:
# also count files with no detection
license_expressions.append(None)
else:
license_expressions.extend(detected_expressions)
# Collect direct children expression tallies
for child in children:
child_tallies = get_resource_tallies(child, key=LIC_EXP, as_attribute=keep_details) or []
for child_tally in child_tallies:
# TODO: review this: this feels rather weird
child_sum_val = child_tally.get('value')
values = [child_sum_val] * child_tally['count']
license_expressions.extend(values)
# summarize proper
licenses_counter = tally_licenses(license_expressions)
tallied = sorted_counter(licenses_counter)
set_resource_tallies(resource, key=LIC_EXP, value=tallied, as_attribute=keep_details)
return tallied
def tally_licenses(license_expressions):
"""
Given a list of license expressions, return a mapping of {expression: count
of occurences}
"""
# TODO: we could normalize and/or sort each license_expression before
# summarization and consider other equivalence or containment checks
return Counter(license_expressions)
def language_tallies(resource, children, keep_details=False):
"""
Populate a programming_language tallies list of mappings such as
{value: "programming_language", count: "count of occurences"}
sorted by decreasing count.
"""
PROG_LANG = 'programming_language'
languages = []
prog_lang = getattr(resource, PROG_LANG , [])
if not prog_lang:
if resource.is_file:
# also count files with no detection
languages.append(None)
else:
languages.append(prog_lang)
# Collect direct children expression summaries
for child in children:
child_tallies = get_resource_tallies(child, key=PROG_LANG, as_attribute=keep_details) or []
for child_tally in child_tallies:
child_sum_val = child_tally.get('value')
if child_sum_val:
values = [child_sum_val] * child_tally['count']
languages.extend(values)
# summarize proper
languages_counter = tally_languages(languages)
tallied = sorted_counter(languages_counter)
set_resource_tallies(resource, key=PROG_LANG, value=tallied, as_attribute=keep_details)
return tallied
def tally_languages(languages):
"""
Given a list of languages, return a mapping of {language: count
of occurences}
"""
# TODO: consider aggregating related langauges (C/C++, etc)
return Counter(languages)
TALLYABLE_ATTRS = set([
'detected_license_expression',
'copyrights',
'holders',
'authors',
'programming_language',
# 'packages',
])
def tally_values(values, attribute):
"""
Given a list of `values` for a given `attribute`, return a mapping of
{value: count of occurences} using a tallier specific to the attribute.
"""
if attribute not in TALLYABLE_ATTRS:
return {}
from summarycode.copyright_tallies import tally_copyrights, tally_persons
value_talliers_by_attr = dict(
detected_license_expression=tally_licenses,
copyrights=tally_copyrights,
holders=tally_persons,
authors=tally_persons,
programming_language=tally_languages,
)
return value_talliers_by_attr[attribute](values)
@post_scan_impl
class KeyFilesTallies(PostScanPlugin):
"""
Compute tallies of a scan at the codebase level for only key files.
"""
run_order = 150
sort_order = 150
# mapping of tally data at the codebase level for key files
codebase_attributes = dict(tallies_of_key_files=attr.ib(default=attr.Factory(dict)))
options = [
PluggableCommandLineOption(('--tallies-key-files',),
is_flag=True, default=False,
help='Compute tallies for license, copyright and other scans for key, '
'top-level files. Key files are top-level codebase files such '
'as COPYING, README and package manifests as reported by the '
'--classify option "is_legal", "is_readme", "is_manifest" '
'and "is_top_level" flags.',
help_group=POST_SCAN_GROUP,
required_options=['classify', 'tallies']
)
]
def is_enabled(self, tallies_key_files, **kwargs):
return tallies_key_files
def process_codebase(self, codebase, tallies_key_files, **kwargs):
tally_codebase_key_files(codebase, **kwargs)
def tally_codebase_key_files(codebase, field='tallies', **kwargs):
"""
Summarize codebase key files.
"""
talliables = codebase.attributes.tallies.keys()
if TRACE: logger_debug('tallieables:', talliables)
# TODO: we cannot summarize packages with "key files" for now
talliables = [k for k in talliables if k in TALLYABLE_ATTRS]
# create one counter for each summarized attribute
talliable_values_by_key = dict([(key, []) for key in talliables])
# filter to get only key files
key_files = (res for res in codebase.walk(topdown=True)
if (res.is_file and res.is_top_level
and (res.is_readme or res.is_legal or res.is_manifest)))
for resource in key_files:
for key, values in talliable_values_by_key.items():
# note we assume things are stored as extra-data, not as direct
# Resource attributes
res_tallies = get_resource_tallies(resource, key=key, as_attribute=False) or []
for tally in res_tallies:
# each tally is a mapping with value/count: we transform back to values
tally_value = tally.get('value')
if tally_value:
values.extend([tally_value] * tally['count'])
tally_counters = []
for key, values in talliable_values_by_key.items():
if key not in TALLYABLE_ATTRS:
continue
tallied = tally_values(values, key)
tally_counters.append((key, tallied))
sorted_tallies = dict(
[(key, sorted_counter(counter)) for key, counter in tally_counters])
codebase.attributes.tallies_of_key_files = sorted_tallies
if TRACE: logger_debug('codebase tallies_of_key_files:', sorted_tallies)
@post_scan_impl
class FacetTallies(PostScanPlugin):
"""
Compute tallies for a scan at the codebase level, grouping by facets.
"""
run_order = 200
sort_order = 200
codebase_attributes = dict(tallies_by_facet=attr.ib(default=attr.Factory(list)))
options = [
PluggableCommandLineOption(('--tallies-by-facet',),
is_flag=True, default=False,
help='Compute tallies for license, copyright and other scans and group the '
'results by facet.',
help_group=POST_SCAN_GROUP,
required_options=['facet', 'tallies']
)
]
def is_enabled(self, tallies_by_facet, **kwargs):
return tallies_by_facet
def process_codebase(self, codebase, tallies_by_facet, **kwargs):
if TRACE_LIGHT: logger_debug('FacetTallies:process_codebase')
tally_codebase_by_facet(codebase, **kwargs)
def tally_codebase_by_facet(codebase, **kwargs):
"""
Summarize codebase by facte.
"""
from summarycode import facet as facet_module
talliable = codebase.attributes.tallies.keys()
if TRACE:
logger_debug('tally_codebase_by_facet for attributes:', talliable)
# create one group of by-facet values lists for each summarized attribute
talliable_values_by_key_by_facet = dict([
(facet, dict([(key, []) for key in talliable]))
for facet in facet_module.FACETS
])
for resource in codebase.walk(topdown=True):
if not resource.is_file:
continue
for facet in resource.facets:
# note: this will fail loudly if the facet is not a known one
values_by_attribute = talliable_values_by_key_by_facet[facet]
for key, values in values_by_attribute.items():
# note we assume things are stored as extra-data, not as direct
# Resource attributes
res_tallies = get_resource_tallies(resource, key=key, as_attribute=False) or []
for tally in res_tallies:
# each tally is a mapping with value/count: we transform back to discrete values
tally_value = tally.get('value')
if tally_value:
values.extend([tally_value] * tally['count'])
final_tallies = []
for facet, talliable_values_by_key in talliable_values_by_key_by_facet.items():
tally_counters = (
(key, tally_values(values, key))
for key, values in talliable_values_by_key.items()
)
sorted_tallies = dict(
[(key, sorted_counter(counter)) for key, counter in tally_counters])
facet_tally = dict(facet=facet)
facet_tally['tallies'] = sorted_tallies
final_tallies.append(facet_tally)
codebase.attributes.tallies_by_facet.extend(final_tallies)
if TRACE: logger_debug('codebase tallies_by_facet:', final_tallies)
def add_files(packages, resource):
"""
Update in-place every package mapping in the `packages` list by updating or
creating the the "files" attribute from the `resource`. Yield back the
packages.
"""
for package in packages:
files = package['files'] = package.get('files') or []
fil = resource.to_dict(skinny=True)
if fil not in files:
files.append(fil)
yield package
def package_tallies(resource, children, keep_details=False):
"""
Populate a packages tally list of packages mappings.
Note: `keep_details` is never used, as we are not keeping details of
packages as this has no value.
"""
packages = []
# Collect current data
current_packages = getattr(resource, 'packages') or []
if TRACE_LIGHT and current_packages:
from packagedcode.models import Package
packs = [Package(**p) for p in current_packages]
logger_debug('package_tallier: for:', resource,
'current_packages are:', packs)
current_packages = add_files(current_packages, resource)
packages.extend(current_packages)
if TRACE_LIGHT and packages:
logger_debug()
from packagedcode.models import Package # NOQA
packs = [Package(**p) for p in packages]
logger_debug('package_tallier: for:', resource,
'packages are:', packs)
# Collect direct children packages tallies
for child in children:
child_tallies = get_resource_tallies(child, key='packages', as_attribute=False) or []
packages.extend(child_tallies)
# summarize proper
set_resource_tallies(resource, key='packages', value=packages, as_attribute=False)
return packages
|
/scancode_toolkit_mini-32.0.6-cp311-none-any.whl/summarycode/tallies.py
| 0.550366 | 0.161982 |
tallies.py
|
pypi
|
"""
Monkeypatch Pool iterators so that Ctrl-C interrupts everything properly
derived from https://gist.github.com/aljungberg/626518
Copyright (c) Alexander Ljungberg. All rights reserved.
Modifications Copyright (c) nexB Inc. and others. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from multiprocessing import pool
from multiprocessing import TimeoutError
class ScanCodeTimeoutError(Exception):
pass
def wrapped(func):
"""
Ensure that we have a default timeout in all cases.
This is to work around some subtle Python bugs in multiprocessing
- https://bugs.python.org/issue8296
- https://bugs.python.org/issue9205
- https://bugs.python.org/issue22393
- https://bugs.python.org/issue38084
- """
# ensure that we do not double wrap
if func.__name__ != 'wrap':
def wrap(self, timeout=None):
try:
result = func(self, timeout=timeout or 3600)
except TimeoutError as te:
raise ScanCodeTimeoutError() from te
return result
return wrap
else:
return func
pool.IMapIterator.next = wrapped(pool.IMapIterator.next)
pool.IMapIterator.__next__ = pool.IMapIterator.next
pool.IMapUnorderedIterator.next = wrapped(pool.IMapUnorderedIterator.next)
pool.IMapUnorderedIterator.__next__ = pool.IMapUnorderedIterator.next
def get_pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
return pool.Pool(processes, initializer, initargs, maxtasksperchild)
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/scancode/pool.py
| 0.710829 | 0.206854 |
pool.py
|
pypi
|
from itertools import islice
from os.path import getsize
import logging
import os
import sys
from commoncode.filetype import get_last_modified_date
from commoncode.hash import multi_checksums
from scancode import ScancodeError
from typecode.contenttype import get_type
TRACE = os.environ.get('SCANCODE_DEBUG_API', False)
def logger_debug(*args):
pass
logger = logging.getLogger(__name__)
if TRACE:
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(
' '.join(isinstance(a, str) and a or repr(a) for a in args)
)
"""
Main scanning functions.
Each scanner is a function that accepts a location and returns a sequence of
mappings as results.
Note: this API is unstable and still evolving.
"""
def get_copyrights(
location,
deadline=sys.maxsize,
**kwargs,
):
"""
Return a mapping with a single 'copyrights' key with a value that is a list
of mappings for copyright detected in the file at `location`.
"""
from cluecode.copyrights import detect_copyrights
from cluecode.copyrights import Detection
detections = detect_copyrights(
location,
include_copyrights=True,
include_holders=True,
include_authors=True,
include_copyright_years=True,
include_copyright_allrights=False,
deadline=deadline,
)
copyrights, holders, authors = Detection.split(detections, to_dict=True)
results = dict([
('copyrights', copyrights),
('holders', holders),
('authors', authors),
])
# TODO: do something if we missed the deadline
return results
def get_emails(
location,
threshold=50,
test_slow_mode=False,
test_error_mode=False,
**kwargs,
):
"""
Return a mapping with a single 'emails' key with a value that is a list of
mappings for emails detected in the file at `location`.
Return only up to `threshold` values. Return all values if `threshold` is 0.
If test_mode is True, the scan will be slow for testing purpose and pause
for one second.
"""
if test_error_mode:
raise ScancodeError('Triggered email failure')
if test_slow_mode:
import time
time.sleep(1)
from cluecode.finder import find_emails
results = []
found_emails = ((em, ln) for (em, ln) in find_emails(location) if em)
if threshold:
found_emails = islice(found_emails, threshold)
for email, line_num in found_emails:
result = {}
results.append(result)
result['email'] = email
result['start_line'] = line_num
result['end_line'] = line_num
return dict(emails=results)
def get_urls(location, threshold=50, **kwargs):
"""
Return a mapping with a single 'urls' key with a value that is a list of
mappings for urls detected in the file at `location`.
Return only up to `threshold` values. Return all values if `threshold` is 0.
"""
from cluecode.finder import find_urls
results = []
found_urls = ((u, ln) for (u, ln) in find_urls(location) if u)
if threshold:
found_urls = islice(found_urls, threshold)
for urls, line_num in found_urls:
result = {}
results.append(result)
result['url'] = urls
result['start_line'] = line_num
result['end_line'] = line_num
return dict(urls=results)
SPDX_LICENSE_URL = 'https://spdx.org/licenses/{}'
DEJACODE_LICENSE_URL = 'https://enterprise.dejacode.com/urn/urn:dje:license:{}'
SCANCODE_LICENSEDB_URL = 'https://scancode-licensedb.aboutcode.org/{}'
SCANCODE_DATA_BASE_URL = 'https://github.com/nexB/scancode-toolkit/tree/develop/src/licensedcode/data'
SCANCODE_LICENSE_URL = f'{SCANCODE_DATA_BASE_URL}/licenses/{{}}.LICENSE'
SCANCODE_RULE_URL = f'{SCANCODE_DATA_BASE_URL}/rules/{{}}'
def get_licenses(
location,
min_score=0,
include_text=False,
license_text_diagnostics=False,
license_diagnostics=False,
deadline=sys.maxsize,
unknown_licenses=False,
**kwargs,
):
"""
Return a mapping or license_detections for licenses detected in the file at
`location`
This mapping contains two keys:
- 'license_detections' with a value that is list of mappings of license information.
- 'detected_license_expression' with a value that is a license expression string.
`min_score` is a minimum score threshold from 0 to 100. The default is 0,
meaning that all license matches are returned. If specified, matches with a
score lower than `minimum_score` are not returned.
If `include_text` is True, matched text is included in the returned
`licenses` data as well as a file-level `percentage_of_license_text`
as the percentage of file words detected as license text or notice.
This is used to determine if a file contains mostly licensing.
If ``unknown_licenses`` is True, also detect unknown licenses.
"""
from licensedcode.cache import build_spdx_license_expression
from licensedcode.cache import get_cache
from licensedcode.detection import detect_licenses
from packagedcode.utils import combine_expressions
license_clues = []
license_detections = []
detected_expressions = []
detected_license_expression = None
detected_license_expression_spdx = None
detections = detect_licenses(
location=location,
min_score=min_score,
deadline=deadline,
unknown_licenses=unknown_licenses,
**kwargs,
)
all_qspans = []
detection = None
for detection in detections:
all_qspans.extend(detection.qspans)
if detection.license_expression is None:
detection_mapping = detection.to_dict(
include_text=include_text,
license_text_diagnostics=license_text_diagnostics,
license_diagnostics=license_diagnostics,
)
license_clues.extend(detection_mapping["matches"])
else:
detected_expressions.append(detection.license_expression)
license_detections.append(
detection.to_dict(
include_text=include_text,
license_text_diagnostics=license_text_diagnostics,
license_diagnostics=license_diagnostics,
)
)
if TRACE:
logger_debug(f"api: get_licenses: license_detections: {license_detections}")
logger_debug(f"api: get_licenses: license_clues: {license_clues}")
if detected_expressions:
detected_license_expression = combine_expressions(
expressions=detected_expressions,
relation='AND',
unique=True,
)
detected_license_expression_spdx = str(build_spdx_license_expression(
detected_license_expression,
licensing=get_cache().licensing
))
percentage_of_license_text = 0
if detection:
percentage_of_license_text = detection.percentage_license_text_of_file(all_qspans)
return dict([
('detected_license_expression', detected_license_expression),
('detected_license_expression_spdx', detected_license_expression_spdx),
('license_detections', license_detections),
('license_clues', license_clues),
('percentage_of_license_text', percentage_of_license_text),
])
SCANCODE_DEBUG_PACKAGE_API = os.environ.get('SCANCODE_DEBUG_PACKAGE_API', False)
def _get_package_data(location, application=True, system=False, **kwargs):
"""
Return a mapping of package manifest information detected in the file at ``location``.
Include ``application`` packages (such as pypi) and/or ``system`` packages.
Note that all exceptions are caught if there are any errors while parsing a
package manifest.
"""
assert application or system
from packagedcode.recognize import recognize_package_data
try:
return recognize_package_data(
location=location,
application=application,
system=system
) or []
except Exception as e:
if TRACE:
logger.error(f'_get_package_data: location: {location!r}: Exception: {e}')
if SCANCODE_DEBUG_PACKAGE_API:
raise
else:
# attention: we are swallowing ALL exceptions here!
pass
def get_package_info(location, **kwargs):
"""
Return a mapping of package information detected in the file at `location`.
This API function is DEPRECATED, use `get_package_data` instead.
"""
import warnings
warnings.warn(
"`get_package_info` is deprecated. Use `get_package_data` instead.",
DeprecationWarning,
stacklevel=1
)
packages = _get_package_data(location, **kwargs) or []
return dict(packages=[p.to_dict() for p in packages])
def get_package_data(location, application=True, system=False, **kwargs):
"""
Return a mapping of package manifest information detected in the file at
`location`.
Include ``application`` packages (such as pypi) and/or ``system`` packages.
"""
if TRACE:
print(' scancode.api.get_package_data: kwargs', kwargs)
package_datas = _get_package_data(
location=location,
application=application,
system=system,
**kwargs,
) or []
return dict(package_data=[pd.to_dict() for pd in package_datas])
def get_file_info(location, **kwargs):
"""
Return a mapping of file information collected for the file at `location`.
"""
result = {}
# TODO: move date and size these to the inventory collection step???
result['date'] = get_last_modified_date(location) or None
result['size'] = getsize(location) or 0
sha1, md5, sha256 = multi_checksums(location, ('sha1', 'md5', 'sha256')).values()
result['sha1'] = sha1
result['md5'] = md5
result['sha256'] = sha256
collector = get_type(location)
result['mime_type'] = collector.mimetype_file or None
result['file_type'] = collector.filetype_file or None
result['programming_language'] = collector.programming_language or None
result['is_binary'] = bool(collector.is_binary)
result['is_text'] = bool(collector.is_text)
result['is_archive'] = bool(collector.is_archive)
result['is_media'] = bool(collector.is_media)
result['is_source'] = bool(collector.is_source)
result['is_script'] = bool(collector.is_script)
return result
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/scancode/api.py
| 0.54698 | 0.198258 |
api.py
|
pypi
|
# FIXME: the glob story is very weird!!!
examples_text = '''
Scancode command lines examples:
(Note for Windows: use '\\' back slash instead of '/' forward slash for paths.)
Scan a single file for copyrights. Print scan results to stdout as pretty JSON:
scancode --copyright samples/zlib/zlib.h --json-pp -
Scan a single file for licenses, print verbose progress to stderr as each
file is scanned. Save scan to a JSON file:
scancode --license --verbose samples/zlib/zlib.h --json licenses.json
Scan a directory explicitly for licenses and copyrights. Redirect JSON scan
results to a file:
scancode --license --copyright samples/zlib/ --json - > scan.json
Scan a directory while ignoring a single file. Scan for license, copyright and
package manifests. Use four parallel processes.
Print scan results to stdout as pretty formatted JSON.
scancode -lc --package --ignore README --processes 4 --json-pp - samples/
Scan a directory while ignoring all files with .txt extension.
Print scan results to stdout as pretty formatted JSON.
It is recommended to use quotes around glob patterns to prevent pattern
expansion by the shell:
scancode --json-pp - --ignore "*.txt" samples/
Special characters supported in GLOB pattern:
- * matches everything
- ? matches any single character
- [seq] matches any character in seq
- [!seq] matches any character not in seq
For a literal match, wrap the meta-characters in brackets.
For example, '[?]' matches the character '?'.
For details on GLOB patterns see https://en.wikipedia.org/wiki/Glob_(programming).
Note: Glob patterns cannot be applied to path as strings.
For example, this will not ignore "samples/JGroups/licenses".
scancode --json - --ignore "samples*licenses" samples/
Scan a directory while ignoring multiple files (or glob patterns).
Print the scan results to stdout as JSON:
scancode --json - --ignore README --ignore "*.txt" samples/
Scan a directory for licenses and copyrights. Save scan results to an
HTML file:
scancode --license --copyright --html scancode_result.html samples/zlib
'''
epilog_text = '''Examples (use --examples for more):
\b
Scan the 'samples' directory for licenses and copyrights.
Save scan results to the 'scancode_result.json' JSON file:
scancode --license --copyright --json-pp scancode_result.json samples
\b
Scan the 'samples' directory for licenses and package manifests. Print scan
results on screen as pretty-formatted JSON (using the special '-' FILE to print
to on screen/to stdout):
scancode --json-pp - --license --package samples
Note: when you run scancode, a progress bar is displayed with a counter of the
number of files processed. Use --verbose to display file-by-file progress.
'''
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/scancode/help.py
| 0.430866 | 0.350866 |
help.py
|
pypi
|
import string
import re
import ipaddress
import urlpy
from commoncode.text import toascii
from cluecode import finder_data
from textcode import analysis
# Tracing flags
TRACE = False
TRACE_URL = False
TRACE_EMAIL = False
def logger_debug(*args):
pass
if TRACE or TRACE_URL or TRACE_EMAIL:
import logging
import sys
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
"""
Find patterns in text lines such as a emails and URLs.
Optionally apply filters to pattern matches.
"""
def find(location, patterns):
"""
Yield match and matched lines for patterns found in file at location as a
tuple of (key, found text, text line). `patterns` is a list of tuples (key,
compiled regex).
Note: the location can be a list of lines for testing convenience.
"""
if TRACE:
from pprint import pformat
loc = pformat(location)
logger_debug('find(location=%(loc)r,\n patterns=%(patterns)r)' % locals())
for line_number, line in analysis.numbered_text_lines(location, demarkup=False):
for key, pattern in patterns:
for match in pattern.findall(line):
if TRACE:
logger_debug('find: yielding match: key=%(key)r, '
'match=%(match)r,\n line=%(line)r' % locals())
yield key, toascii(match), line, line_number
def unique_filter(matches):
"""
Iterate over matches and yield unique matches.
"""
uniques = set()
for key, match, line, line_number in matches:
if (key, match,) in uniques:
continue
uniques.add((key, match,))
yield key, match, line, line_number
def apply_filters(matches, *filters):
"""
Apply a sequence of `filters` to a `matches` iterable. Return a new filtered
matches iterable.
A filter must accept a single arg: an iterable of tuples of (key, match,
line, line_number) and must return an iterable of tuples of (key, match, line,
line_number).
"""
for filt in filters:
matches = filt(matches)
return matches
def build_regex_filter(pattern):
"""
Return a filter function using regex pattern, filtering out matches
matching this regex. The pattern should be text, not a compiled re.
"""
def re_filt(matches):
if TRACE:
logger_debug('re_filt: pattern="{}"'.format(pattern))
for key, match, line, line_number in matches:
if matcher(match):
if TRACE:
logger_debug('re_filt: filtering match: "{}"'.format(match))
continue
yield key, match, line, line_number
matcher = re.compile(pattern, re.UNICODE | re.IGNORECASE).match
return re_filt
# A good reference page of email address regex is:
# http://fightingforalostcause.net/misc/2006/compare-email-regex.php email
# regex from http://www.regular-expressions.info/regexbuddy/email.html
def emails_regex():
return re.compile('\\b[A-Z0-9._%-]+@[A-Z0-9.-]+\\.[A-Z]{2,4}\\b', re.IGNORECASE)
def find_emails(location, unique=True):
"""
Yield an iterable of (email, line_number) found in file at ``location``.
Only return unique items if ``unique`` is True.
"""
patterns = [('emails', emails_regex(),)]
matches = find(location, patterns)
if TRACE_EMAIL:
matches = list(matches)
for r in matches:
logger_debug('find_emails: match:', r)
filters = (junk_email_domains_filter, uninteresting_emails_filter)
if unique:
filters += (unique_filter,)
matches = apply_filters(matches, *filters)
for _key, email, _line, line_number in matches:
yield email, line_number
def junk_email_domains_filter(matches):
"""
Given an iterable of email matches, return an iterable where email with
common uninteresting domains have been removed, such as local, non public
or example.com emails.
"""
for key, email, line, line_number in matches:
if is_good_email_domain(email):
yield key, email, line, line_number
else:
if TRACE:
logger_debug(f'junk_email_domains_filter: !is_good_host: {email!r}')
def is_good_email_domain(email):
"""
Return True if the domain of the ``email`` string is valid, False otherwise
such as for local, non public domains.
For example::
>>> is_good_email_domain("[email protected]")
True
>>> is_good_email_domain("[email protected]")
False
>>> is_good_email_domain("[email protected]")
False
"""
if not email:
return False
_dest, _, server = email.partition('@')
if not is_good_host(server):
return False
fake_url = f'http://{server}'
_host, domain = url_host_domain(fake_url)
if not is_good_host(domain):
return False
return True
def uninteresting_emails_filter(matches):
"""
Given an iterable of emails matches, return an iterable where common
uninteresting emails have been removed.
"""
for key, email, line, line_number in matches:
good_email = finder_data.classify_email(email)
if not good_email:
continue
yield key, email, line, line_number
# TODO: consider: http://www.regexguru.com/2008/11/detecting-urls-in-a-block-of-text/
# TODO: consider: http://blog.codinghorror.com/the-problem-with-urls/
schemes = 'https?|ftps?|sftp|rsync|ssh|svn|git|hg|https?\\+git|https?\\+svn|https?\\+hg'
url_body = '[^\\s<>\\[\\]"]'
def urls_regex():
# no space, no < >, no [ ] and no double quote
return re.compile('''
(
# URLs with schemes
(?:%(schemes)s)://%(url_body)s+
|
# common URLs prefix without schemes
(?:www|ftp)\\.%(url_body)s+
|
# git style [email protected]:christophercantu/pipeline.git
git\\@%(url_body)s+:%(url_body)s+\\.git
)''' % globals()
, re.UNICODE | re.VERBOSE | re.IGNORECASE)
INVALID_URLS_PATTERN = '((?:' + schemes + ')://([$%*/_])+)'
def find_urls(location, unique=True):
"""
Yield an iterable of (url, line_number) found in file at ``location``.
Only return unique items if ``unique`` is True.
`location` can be a list of strings for testing.
"""
patterns = [('urls', urls_regex(),)]
matches = find(location, patterns)
if TRACE:
matches = list(matches)
for m in matches:
logger_debug('url match:', m)
# the order of filters IS important
filters = (
verbatim_crlf_url_cleaner,
end_of_url_cleaner,
empty_urls_filter,
scheme_adder,
user_pass_cleaning_filter,
build_regex_filter(INVALID_URLS_PATTERN),
canonical_url_cleaner,
junk_url_hosts_filter,
junk_urls_filter,
)
if unique:
filters += (unique_filter,)
matches = apply_filters(matches, *filters)
for _key, url, _line, line_number in matches:
if TRACE_URL:
logger_debug('find_urls: line_number:', line_number, '_line:', repr(_line),
'type(url):', type(url), 'url:', repr(url))
yield str(url), line_number
EMPTY_URLS = set(['https', 'http', 'ftp', 'www', ])
def empty_urls_filter(matches):
"""
Given an iterable of URL matches, return an iterable without empty URLs.
"""
for key, match, line, line_number in matches:
junk = match.lower().strip(string.punctuation).strip()
if not junk or junk in EMPTY_URLS:
if TRACE:
logger_debug('empty_urls_filter: filtering match: %(match)r' % locals())
continue
yield key, match, line, line_number
def verbatim_crlf_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where literal end of
lines and carriage return characters that may show up as-is, un-encoded in
a URL have been removed.
"""
# FIXME: when is this possible and could happen?
for key, url, line, line_number in matches:
if not url.endswith('/'):
url = url.replace('\n', '')
url = url.replace('\r', '')
yield key, url, line, line_number
def end_of_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where junk characters
commonly found at the end of a URL are removed.
This is not entirely correct, but works practically.
"""
for key, url, line, line_number in matches:
if not url.endswith('/'):
url = url.replace(u'<', u'<')
url = url.replace(u'>', u'>')
url = url.replace(u'&', u'&')
url = url.rstrip(string.punctuation)
url = url.split(u'\\')[0]
url = url.split(u'<')[0]
url = url.split(u'>')[0]
url = url.split(u'(')[0]
url = url.split(u')')[0]
url = url.split(u'[')[0]
url = url.split(u']')[0]
url = url.split(u'"')[0]
url = url.split(u"'")[0]
yield key, url, line, line_number
non_standard_urls_prefix = ('git@',)
def is_filterable(url):
"""
Return True if a url is eligible for filtering. Certain URLs should not pass
through certain filters (such as a [email protected] style urls)
"""
return not url.startswith(non_standard_urls_prefix)
def scheme_adder(matches):
"""
Add a fake http:// scheme if there was none.
"""
for key, match, line, line_number in matches:
if is_filterable(match):
match = add_fake_scheme(match)
yield key, match, line, line_number
def add_fake_scheme(url):
"""
Add a fake http:// scheme to URL if has none.
"""
if not has_scheme(url):
url = 'http://' + url.lstrip(':/').strip()
return url
def has_scheme(url):
"""
Return True if url has a scheme.
"""
return re.match('^(?:%(schemes)s)://.*' % globals(), url, re.UNICODE)
def user_pass_cleaning_filter(matches):
"""
Given an iterable of URL matches, return an iterable where user and
password are removed from the URLs host.
"""
for key, match, line, line_number in matches:
if is_filterable(match):
host, _domain = url_host_domain(match)
if not host:
if TRACE:
logger_debug('user_pass_cleaning_filter: '
'filtering match(no host): %(match)r' % locals())
continue
if '@' in host:
# strips any user/pass
host = host.split(u'@')[-1]
yield key, match, line, line_number
DEFAULT_PORTS = {
'http': 80,
'https': 443
}
def canonical_url(uri):
"""
Return the canonical representation of a given URI.
This assumes the `uri` has a scheme.
* When a default port corresponding for the scheme is explicitly declared
(such as port 80 for http), the port will be removed from the output.
* Fragments '#' are not removed.
* Params and query string arguments are not reordered.
"""
try:
parsed = urlpy.parse(uri)
if not parsed:
return
if TRACE:
logger_debug('canonical_url: parsed:', parsed)
sanitized = parsed.sanitize()
if TRACE:
logger_debug('canonical_url: sanitized:', sanitized)
punycoded = sanitized.punycode()
if TRACE:
logger_debug('canonical_url: punycoded:', punycoded)
deport = punycoded.remove_default_port()
if TRACE:
logger_debug('canonical_url: deport:', deport)
return str(sanitized)
except Exception as e:
if TRACE:
logger_debug('canonical_url: failed for:', uri, 'with:', repr(e))
# ignore it
pass
def canonical_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where URLs have been
canonicalized.
"""
for key, match, line, line_number in matches:
if is_filterable(match):
canonical = canonical_url(match)
if TRACE:
logger_debug('canonical_url_cleaner: '
'match=%(match)r, canonical=%(canonical)r' % locals())
match = canonical
if match:
yield key, match , line, line_number
IP_V4_RE = '^(\\d{1,3}\\.){0,3}\\d{1,3}$'
def is_ip_v4(s):
return re.compile(IP_V4_RE, re.UNICODE).match(s)
IP_V6_RE = (
'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}$'
'|'
'^([0-9a-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}\\d{1,3}$'
)
def is_ip_v6(s):
"""
Return True is string s is an IP V6 address
"""
return re.compile(IP_V6_RE, re.UNICODE).match(s)
def is_ip(s):
"""
Return True is string s is an IP address
"""
return is_ip_v4(s) or is_ip_v6(s)
def get_ip(s):
"""
Return True is string s is an IP address
"""
if not is_ip(s):
return False
try:
ip = ipaddress.ip_address(str(s))
return ip
except ValueError:
return False
def is_private_ip(ip):
"""
Return true if ip object is a private or local IP.
"""
if ip:
if isinstance(ip, ipaddress.IPv4Address):
private = (
ip.is_reserved
or ip.is_private
or ip.is_multicast
or ip.is_unspecified
or ip.is_loopback
or ip.is_link_local
)
else:
private(
ip.is_multicast
or ip.is_reserved
or ip.is_link_local
or ip.is_site_local
or ip.is_private
or ip.is_unspecified
or ip.is_loopback
)
return private
def is_good_host(host):
"""
Return True if the host is not some local or uninteresting host.
"""
if not host:
return False
ip = get_ip(host)
if ip:
if is_private_ip(ip):
return False
return finder_data.classify_ip(host)
# at this stage we have a host name, not an IP
if '.' not in host:
# private hostnames not in a domain, including localhost
return False
good_host = finder_data.classify_host(host)
return good_host
def url_host_domain(url):
"""
Return a tuple of the (host, domain) of a URL or None. Assumes that the
URL has a scheme.
"""
try:
parsed = urlpy.parse(url)
host = parsed.host
if not host:
return None, None
domain = parsed.pld
return host.lower(), domain.lower()
except Exception as e:
if TRACE:
logger_debug('url_host_domain: failed for:', url, 'with:', repr(e))
# ignore it
return None, None
def junk_url_hosts_filter(matches):
"""
Given an iterable of URL matches, return an iterable where URLs with
common uninteresting hosts or domains have been removed, such as local,
non public or example.com URLs.
"""
for key, match, line, line_number in matches:
if is_filterable(match):
host, domain = url_host_domain(match)
if not is_good_host(host):
if TRACE:
logger_debug('junk_url_hosts_filter: '
'!is_good_host:%(host)r): %(match)r' % locals())
continue
if not is_good_host(domain) and not is_ip(host):
if TRACE:
logger_debug('junk_url_hosts_filter: ''!is_good_host:%(domain)r '
'and !is_ip:%(host)r: %(match)r' % locals())
continue
yield key, match, line, line_number
def junk_urls_filter(matches):
"""
Given an iterable of URL matches, return an iterable where URLs with
common uninteresting URLs, or uninteresting URL hosts or domains have been
removed, such as local, non public or example.com URLs.
"""
for key, match, line, line_number in matches:
good_url = finder_data.classify_url(match)
if not good_url:
if TRACE:
logger_debug('junk_url_filter: %(match)r' % locals())
continue
yield key, match, line, line_number
def find_pattern(location, pattern, unique=False):
"""
Find regex pattern in the text lines of file at location.
Return all match groups joined as one unicode string.
Only return unique items if unique is True.
"""
pattern = re.compile(pattern, re.UNICODE | re.IGNORECASE)
matches = find(location, [(None, pattern,)])
if unique:
matches = unique_filter(matches)
for _key, match , _line, line_number in matches:
yield match, line_number
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/cluecode/finder.py
| 0.543106 | 0.197735 |
finder.py
|
pypi
|
The purpose of `packagedcode` is to:
- detect a package,
- determine its dependencies,
- collect its declared licensing (at the metadata/manifest level)
vs. its actual license (as scanned and normalized).
1. **detect the presence of a package** in a codebase based on its manifest, its file
or archive type. Typically it is a third party package but it may be your own.
Taking Python as a main example a package can exist in multiple forms:
1.1. as a **source checkout** (or some source archive such as a source
distribution or an `sdist`) where the presence of a `setup.py` or some
`requirements.txt` file is the key marker for Python. For Maven it would be a
`pom.xml` or a `build.gradle` file, for Ruby a `Gemfile` or `Gemfile.lock`, the
presence of autotools files, and so on, with the goal to eventually covering all
the packages formats/types that are out there and commonly used.
1.2. as an **installable archive or binary** such as a Pypi wheel `.whl` or
`.egg`, a Maven `.jar`, a Ruby `.gem`, a `.nupkg` for a Nuget, a `.rpm` or `.deb`
Linux package, etc... Here the type, shape and name structure of an archive as
well as some its files content are the key markers for detection. The metadata
may also be included in that archive as a file or as some headers (e.g. RPMs)
1.3. as an **installed packaged** such as when you `pip install` a Python package
or `bundle install` Ruby gems or `npm install` node modules. Here the key markers
may be some combo of a typical or conventional directory layout and presence of
specific files such as the metadata installed with a Python `wheel`, a `vendor`
directory for Ruby, some `node_modules` directory tree for npms, or a certain
file type with metadata such as Windows DLLs. Additional markers may also include
"namespaces" such as Java or Python imports, C/C++ namespace declarations.
2. **parse and collect the package datafile or manifest(s)** metadata. For Python, this means
extracting name, version, authorship, declared licensing and declared dependencies as
found in the any of the package descriptor files (e.g. a `setup.py` file,
`requirements` file(s) or any of the `*-dist-info` or `*-egg-info` dir files such as
a `metadata.json`). Other package datafile formats have their own metatada that may be more or
less comprehensive in the breadth and depth of information they offer (e.g.
`.nuspec`, `package.json`, `bower.json`, Godeps, etc...). These metadata include the
declared dependencies (and in some cases the fully resolved dependencies too such as
with Gemfile.lock). Finally, all the different packages formats and data are
normalized and stored in a common data structure abstracting the small differences of
naming and semantics that may exists between all the different package formats.
Once collected, these data are then injected in the `package_data` section of a file scan
for each recognized package datafile.
3. **assemble multiple package datafile** as top level packages.
What code in `packagedcode` is not meant to do:
A. **download packages** from a thirdparty repository: there is code planned for
another tool that will specifically deal with this and also handle collecting
the metadata as served by a package repository (which are in most cases --but not
always-- the same as what is declared in the manifests).
B. **resolve dependencies**: the focus here is on a purely static analysis that by
design does not rely on any network access at runtime. To scan for actually used
dependencies the process is to scan an as-built or as-installed or as-deployed
odebase where the dependencies have already been provisioned and installed
ScanCode will also detect these.
There is also a planned prototype for a dynamic multi-package dependencies
resolver that actually runs live the proper tool to resolve and collect dependencies
(e.g. effectively running Maven, bundler, pip, npm, gradle, bower, go get/dep, etc).
This will be a tool separate from ScanCode as this requires having several/all
package managers installed (and possibly multiple versions of each) and may run code
from the codebase (e.g. a setup.py) and access the network for fetching or resolving
dependencies. It could be also exposed as a web service that can take in a manifest
and package and run the dep resolution safely in an isolated environment (e.g. a
chroot jail or docker container) and return the collected deps.
C. **match packages** (and files) to actual repositories or registries, e.g. given a
scan detecting packages, matching will look them up in a remote package
repository or a local index and possibly using A. and/or B. additionally if needed.
Here again there is a planned code and a tool that will deal specifically with
this aspect and will handle also building an index of actual registries/repositories
and matching using hashes and fingerprints.
And now some answers to questions originally reported by @sschuberth:
> This does not download the source code of a Python package to run ScanCode over it.
Correct. The assumption with ScanCode proper is that the deps have been fetched in the
code you scan if you want to scan for deps. Packages will be detected with their declared
deps, but the deps will neither be resolved nor fetched. As a second step we could also
verify that all the declared deps are present in the scanned code as detected packages.
> This means that cases where the license from the metadata is wrong compared to the LICENSE
file in the source code will not be detected.
Both the metadata and the file level licenses (such as a header comment or a
`LICENSE` file of sorts) are detected by ScanCode: the license scan detect the
licenses while the package scan collects the declared licensing in the metadata. The
interesting thing thanks to this combo is that conflicts (or incomplete
data) can be analyzed and an automated deduction process is feasible: given a
scan for packages and licenses and copyrights, do the package metadata
asserted/declared licenses match the actual detected licenses? If not this could be
reported as an "error" condition... Furthermore, this could be refined based on
classification of the files: a package may assert a top level `MIT` license and use a
GPL-licensed build script. By knowing that the build script is indeed a build script,
we could report that the GPL detected in such script does not conflict with the
overall declared MIT license of the package. The same could be done with test
scripts/code, or documentation code (such as doxygen-generated docs)
> Licenses from transitive dependencies are not taken into account.
If the transitive dependencies have been resolved and their code is present in the
codebase, then they would be caught by a static ScanCode scan and eventually scanned
both for package metadata and/or license detection. There are some caveats to deal
with because some tools (e.g. Maven) may not store the corresponding artifacts/Jars
locally (e.g. side-by-side with a given checkout) and use a `~/user` "global" dot
directory to store a cache instead.
Beyond this, actual dependency resolution of a single package or a complete manifest
will be the topic of another tool as mentioned above.
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/packagedcode/README.rst
| 0.895731 | 0.754418 |
README.rst
|
pypi
|
import ast
from pathlib import Path
"""
Parse setup.py files.
"""
# https://setuptools.readthedocs.io/en/latest/setuptools.html#metadata
FIELDS = {
'author_email',
'author',
'classifiers',
'dependency_links',
'description',
'download_url',
'extras_require',
'install_requires',
'keywords',
'license_file',
'license',
'long_description_content_type',
'long_description',
'maintainer_email',
'maintainer',
'metadata_version',
'name',
'obsoletes',
'package_dir',
'platforms',
'project_urls',
'provides',
'python_requires',
'requires',
'setup_requires',
'tests_require',
'url',
'version',
}
def is_setup_call(element):
"""
Return if the AST ``element`` is a call to the setup() function.
Note: this is derived from the code in packagedcode.pypi.py
"""
if (
isinstance(element, ast.Call)
and (
hasattr(element, 'func')
and isinstance(element.func, ast.Name)
and getattr(element.func, 'id', None) == 'setup'
) or (
hasattr(element, 'func')
and isinstance(element.func, ast.Attribute)
and getattr(element.func, 'attr', None) == 'setup'
and isinstance(element.func.value, ast.Name)
and getattr(element.func.value, 'id', None) == 'setuptools'
)
):
return True
def parse_setup_py(location):
"""
Return a mapping of setuptools.setup() call argument found in a setup.py
file at ``location`` or an empty mapping.
"""
path = Path(location)
tree = tuple(ast.parse(path.read_text(encoding='utf8')).body)
body = tuple(get_body(tree))
call = get_setup_call(tree)
result = get_call_kwargs(call, body)
return clean_setup(result)
def get_body(elements):
"""
Yield the body from ``elements`` as a single iterable.
"""
for element in elements:
if isinstance(element, ast.FunctionDef):
yield from get_body(element.body)
continue
if isinstance(element, ast.If):
yield from get_body(element.body)
if isinstance(element, ast.Expr):
yield element.value
continue
yield element
def get_setup_call(elements):
"""
Return a setup() method call found in the ``elements`` or None.
"""
for element in get_body(elements):
if is_setup_call(element):
return element
elif isinstance(element, (ast.Assign,)):
if isinstance(element.value, ast.Call):
if is_setup_call(element.value):
return element.value
def node_to_value(node, body):
"""
Return the extracted and converted value of a node or None
"""
if node is None:
return
if hasattr(ast, 'Constant'):
if isinstance(node, ast.Constant):
return node.value
if isinstance(node, ast.Str):
return node.s
if isinstance(node, ast.Num):
return node.n
if isinstance(node, (ast.List, ast.Tuple, ast.Set,)):
return [node_to_value(subnode, body) for subnode in node.elts]
if isinstance(node, ast.Dict):
result = {}
for key, value in zip(node.keys, node.values):
result[node_to_value(key, body)] = node_to_value(value, body)
return result
if isinstance(node, ast.Name):
variable = find_variable_in_body(body, node.id)
if variable is not None:
return node_to_value(variable, body)
if isinstance(node, ast.Call):
if not isinstance(node.func, ast.Name):
return
if node.func.id != 'dict':
return
return get_call_kwargs(node, body)
return
def find_variable_in_body(body, name):
"""
Return the value of the variable ``name`` found in the ``body`` ast tree or None.
"""
for elem in body:
if not isinstance(elem, ast.Assign):
continue
for target in elem.targets:
if not isinstance(target, ast.Name):
continue
if target.id == name:
return elem.value
def get_call_kwargs(node: ast.Call, body):
"""
Return a mapping of setup() method call keyword arguments.
"""
result = {}
keywords = getattr(node, 'keywords', []) or []
for keyword in keywords:
# dict unpacking
if keyword.arg is None:
value = node_to_value(keyword.value, body)
if isinstance(value, dict):
result.update(value)
continue
# keyword argument
value = node_to_value(keyword.value, body)
if value is None:
continue
result[keyword.arg] = value
return result
def clean_setup(data):
"""
Return a cleaned mapping from a setup ``data`` mapping.
"""
result = {k: v
for k, v in data.items()
if k in FIELDS
and (v and v is not False)
and str(v) != 'UNKNOWN'
}
# split keywords in words
keywords = result.get('keywords')
if keywords and isinstance(keywords, str):
# some keywords are separated by coma, some by space or lines
if ',' in keywords:
keywords = [k.strip() for k in keywords.split(',')]
else:
keywords = keywords.split()
result['keywords'] = keywords
return result
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/packagedcode/pypi_setup_py.py
| 0.793826 | 0.294653 |
pypi_setup_py.py
|
pypi
|
from packagedcode import models
"""
Various package data file formats to implment.
"""
# Package types
# NOTE: this is somewhat redundant with extractcode archive handlers
# yet the purpose and semantics are rather different here
# TODO: parse me!!!
# TODO: add missing URLs and descriptions
class JavaJarHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'java_jar'
# NOTE: there are a few rare cases where a .zip can be a JAR.
path_patterns = ('*.jar',)
filetypes = ('zip archive', 'java archive',)
description = 'JAR Java Archive'
documentation_url = 'https://en.wikipedia.org/wiki/JAR_(file_format)'
class IvyXmlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'ant_ivy_xml'
path_patterns = ('*/ivy.xml',)
default_package_type = 'ivy'
default_primary_language = 'Java'
description = 'Ant IVY dependency file'
documentation_url = 'https://ant.apache.org/ivy/history/latest-milestone/ivyfile.html'
class JavaWarHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'java_war_archive'
path_patterns = ('*.war',)
filetypes = ('zip archive',)
default_package_type = 'war'
default_primary_language = 'Java'
description = 'Java Web Application Archive'
documentation_url = 'https://en.wikipedia.org/wiki/WAR_(file_format)'
class JavaWarWebXmlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'java_war_web_xml'
path_patterns = ('*/WEB-INF/web.xml',)
filetypes = ('zip archive',)
default_package_type = 'war'
default_primary_language = 'Java'
description = 'Java WAR web/xml'
documentation_url = 'https://en.wikipedia.org/wiki/WAR_(file_format)'
class JavaEarHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'java_ear_archive'
default_package_type = 'ear'
default_primary_language = 'Java'
path_patterns = ('*.ear',)
filetypes = ('zip archive',)
description = 'Java EAR Enterprise application archive'
documentation_url = 'https://en.wikipedia.org/wiki/EAR_(file_format)'
class JavaEarAppXmlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'java_ear_application_xml'
default_package_type = 'ear'
default_primary_language = 'Java'
path_patterns = ('*/META-INF/application.xml',)
description = 'Java EAR application.xml'
documentation_url = 'https://en.wikipedia.org/wiki/EAR_(file_format)'
class Axis2MarModuleXmlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'axis2_module_xml'
path_patterns = ('*/meta-inf/module.xml',)
default_package_type = 'axis2'
default_primary_language = 'Java'
description = 'Apache Axis2 module.xml'
documentation_url = 'https://axis.apache.org/axis2/java/core/docs/modules.html'
class Axis2MarArchiveHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'axis2_mar'
path_patterns = ('*.mar',)
filetypes = ('zip archive',)
default_package_type = 'axis2'
default_primary_language = 'Java'
description = 'Apache Axis2 module archive'
documentation_url = 'https://axis.apache.org/axis2/java/core/docs/modules.html'
class JBossSarHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'jboss_sar'
path_patterns = ('*.sar',)
filetypes = ('zip archive',)
default_package_type = 'jboss-service'
default_primary_language = 'Java'
description = 'JBOSS service archive'
documentation_url = 'https://docs.jboss.org/jbossas/docs/Server_Configuration_Guide/4/html/ch02s01.html'
class JBossServiceXmlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'jboss_service_xml'
path_patterns = ('*/meta-inf/jboss-service.xml',)
default_package_type = 'jboss-service'
default_primary_language = 'Java'
description = 'JBOSS service.xml'
documentation_url = 'https://docs.jboss.org/jbossas/docs/Server_Configuration_Guide/4/html/ch02s01.html'
class MeteorPackageHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'meteor_package'
path_patterns = ('*/package.js',)
default_package_type = 'meteor'
default_primary_language = 'JavaScript'
description = 'Meteor package.js'
documentation_url = 'https://docs.meteor.com/api/packagejs.html'
class CpanManifestHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'cpan_manifest'
path_patterns = ('*/MANIFEST',)
default_package_type = 'cpan'
default_primary_language = 'Perl'
description = 'CPAN Perl module MANIFEST'
documentation_url = 'https://metacpan.org/pod/Module::Manifest'
class CpanMakefilePlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'cpan_makefile'
path_patterns = ('*/Makefile.PL',)
default_package_type = 'cpan'
default_primary_language = 'Perl'
description = 'CPAN Perl Makefile.PL'
documentation_url = 'https://www.perlmonks.org/?node_id=128077'
# http://blogs.perl.org/users/neilb/2017/04/an-introduction-to-distribution-metadata.html
# Version 2+ data is what you’ll find in META.json
# Version 1.4 data is what you’ll find in META.yml
class CpanMetaYmlHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'cpan_meta_yml'
path_patterns = ('*/META.yml',)
default_package_type = 'cpan'
default_primary_language = 'Perl'
description = 'CPAN Perl META.yml'
documentation_url = 'https://metacpan.org/pod/CPAN::Meta::YAML'
class CpanMetaJsonHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'cpan_meta_json'
path_patterns = ('*/META.json',)
default_package_type = 'cpan'
default_primary_language = 'Perl'
description = 'CPAN Perl META.json'
documentation_url = 'https://metacpan.org/pod/Parse::CPAN::Meta'
class CpanDistIniHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'cpan_dist_ini'
path_patterns = ('*/dist.ini',)
default_package_type = 'cpan'
default_primary_language = 'Perl'
description = 'CPAN Perl dist.ini'
documentation_url = 'https://metacpan.org/pod/Dist::Zilla::Tutorial'
class AndroidAppArchiveHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'android_apk'
default_package_type = 'android'
default_primary_language = 'Java'
path_patterns = ('*.apk',)
filetypes = ('zip archive',)
description = 'Android application package'
documentation_url = 'https://en.wikipedia.org/wiki/Apk_(file_format)'
# see http://tools.android.com/tech-docs/new-build-system/aar-formats
class AndroidLibraryHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'android_aar_library'
default_package_type = 'android_lib'
default_primary_language = 'Java'
# note: Apache Axis also uses AAR path_patterns for plain Jars.
# this could be decided based on internal structure
path_patterns = ('*.aar',)
filetypes = ('zip archive',)
description = 'Android library archive'
documentation_url = 'https://developer.android.com/studio/projects/android-library'
class MozillaExtensionHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'mozilla_xpi'
path_patterns = ('*.xpi',)
filetypes = ('zip archive',)
default_package_type = 'mozilla'
default_primary_language = 'JavaScript'
description = 'Mozilla XPI extension'
documentation_url = 'https://en.wikipedia.org/wiki/XPInstall'
class ChromeExtensionHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'chrome_crx'
path_patterns = ('*.crx',)
filetypes = ('zip archive',)
default_package_type = 'chrome'
default_primary_language = 'JavaScript'
description = 'Chrome extension'
documentation_url = 'https://chrome.google.com/extensions'
class IosAppIpaHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'ios_ipa'
default_package_type = 'ios'
default_primary_language = 'Objective-C'
path_patterns = ('*.ipa',)
filetypes = ('microsoft cabinet',)
description = 'iOS package archive'
documentation_url = 'https://en.wikipedia.org/wiki/.ipa'
class CabArchiveHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'microsoft_cabinet'
default_package_type = 'cab'
default_primary_language = 'C'
path_patterns = ('*.cab',)
filetypes = ('microsoft cabinet',)
description = 'Microsoft cabinet archive'
documentation_url = 'https://docs.microsoft.com/en-us/windows/win32/msi/cabinet-files'
class InstallShieldPackageHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'installshield_installer'
default_package_type = 'installshield'
path_patterns = ('*.exe',)
filetypes = ('zip installshield',)
description = 'InstallShield installer'
documentation_url = 'https://www.revenera.com/install/products/installshield'
class NsisInstallerHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'nsis_installer'
default_package_type = 'nsis'
path_patterns = ('*.exe',)
filetypes = ('nullsoft installer',)
description = 'NSIS installer'
documentation_url = 'https://nsis.sourceforge.io/Main_Page'
class SharArchiveHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'shar_shell_archive'
default_package_type = 'shar'
path_patterns = ('*.shar',)
filetypes = ('posix shell script',)
description = 'shell archive'
documentation_url = 'https://en.wikipedia.org/wiki/Shar'
class AppleDmgHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'apple_dmg'
default_package_type = 'dmg'
path_patterns = ('*.dmg', '*.sparseimage',)
description = ''
documentation_url = ''
class IsoImageHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'iso_disk_image'
default_package_type = 'iso'
path_patterns = ('*.iso', '*.udf', '*.img',)
filetypes = ('iso 9660 cd-rom', 'high sierra cd-rom',)
description = 'ISO disk image'
documentation_url = 'https://en.wikipedia.org/wiki/ISO_9660'
class SquashfsImageHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'squashfs_disk_image'
default_package_type = 'squashfs'
filetypes = ('squashfs',)
description = 'Squashfs disk image'
documentation_url = 'https://en.wikipedia.org/wiki/SquashFS'
# TODO: Add VM images formats(VMDK, OVA, OVF, VDI, etc) and Docker/other containers
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/packagedcode/misc.py
| 0.428951 | 0.199191 |
misc.py
|
pypi
|
import io
import re
import attr
from packageurl import PackageURL
@attr.s()
class GoModule(object):
namespace = attr.ib(default=None)
name = attr.ib(default=None)
version = attr.ib(default=None)
module = attr.ib(default=None)
require = attr.ib(default=None)
exclude = attr.ib(default=None)
def purl(self, include_version=True):
version = None
if include_version:
version = self.version
return PackageURL(
type='golang',
namespace=self.namespace,
name=self.name,
version=version
).to_string()
# Regex expressions to parse different types of go.mod file dependency
parse_module = re.compile(
r'(?P<type>[^\s]+)'
r'(\s)+'
r'(?P<ns_name>[^\s]+)'
r'\s?'
r'(?P<version>(.*))'
).match
parse_dep_link = re.compile(
r'.*?'
r'(?P<ns_name>[^\s]+)'
r'\s+'
r'(?P<version>(.*))'
).match
def preprocess(line):
"""
Return line string after removing commented portion and excess spaces.
"""
if "//" in line:
line = line[:line.index('//')]
line = line.strip()
return line
def parse_gomod(location):
"""
Return a dictionary containing all the important go.mod file data.
Handle go.mod files from Go.
See https://golang.org/ref/mod#go.mod-files for details
For example:
module example.com/my/thing
go 1.12
require example.com/other/thing v1.0.2
require example.com/new/thing v2.3.4
exclude example.com/old/thing v1.2.3
require (
example.com/new/thing v2.3.4
example.com/old/thing v1.2.3
)
require (
example.com/new/thing v2.3.4
example.com/old/thing v1.2.3
)
Each module line is in the form
require github.com/davecgh/go-spew v1.1.1
or
exclude github.com/davecgh/go-spew v1.1.1
or
module github.com/alecthomas/participle
For example::
>>> p = parse_module('module github.com/alecthomas/participle')
>>> assert p.group('type') == ('module')
>>> assert p.group('ns_name') == ('github.com/alecthomas/participle')
>>> p = parse_module('require github.com/davecgh/go-spew v1.1.1')
>>> assert p.group('type') == ('require')
>>> assert p.group('ns_name') == ('github.com/davecgh/go-spew')
>>> assert p.group('version') == ('v1.1.1')
A line for require or exclude can be in the form:
github.com/davecgh/go-spew v1.1.1
For example::
>>> p = parse_dep_link('github.com/davecgh/go-spew v1.1.1')
>>> assert p.group('ns_name') == ('github.com/davecgh/go-spew')
>>> assert p.group('version') == ('v1.1.1')
"""
with io.open(location, encoding='utf-8', closefd=True) as data:
lines = data.readlines()
gomods = GoModule()
require = []
exclude = []
for i, line in enumerate(lines):
line = preprocess(line)
if 'require' in line and '(' in line:
for req in lines[i + 1:]:
req = preprocess(req)
if ')' in req:
break
parsed_dep_link = parse_dep_link(req)
ns_name = parsed_dep_link.group('ns_name')
namespace, _, name = ns_name.rpartition('/')
if parsed_dep_link:
require.append(GoModule(
namespace=namespace,
name=name,
version=parsed_dep_link.group('version')
)
)
continue
if 'exclude' in line and '(' in line:
for exc in lines[i + 1:]:
exc = preprocess(exc)
if ')' in exc:
break
parsed_dep_link = parse_dep_link(exc)
ns_name = parsed_dep_link.group('ns_name')
namespace, _, name = ns_name.rpartition('/')
if parsed_dep_link:
exclude.append(GoModule(
namespace=namespace,
name=name,
version=parsed_dep_link.group('version')
)
)
continue
parsed_module_name = parse_module(line)
if parsed_module_name:
ns_name = parsed_module_name.group('ns_name')
namespace, _, name = ns_name.rpartition('/')
if 'module' in line:
gomods.namespace = namespace
gomods.name = name
continue
if 'require' in line:
require.append(GoModule(
namespace=namespace,
name=name,
version=parsed_module_name.group('version')
)
)
continue
if 'exclude' in line:
exclude.append(GoModule(
namespace=namespace,
name=name,
version=parsed_module_name.group('version')
)
)
continue
gomods.require = require
gomods.exclude = exclude
return gomods
# Regex expressions to parse go.sum file dependency
# dep example: github.com/BurntSushi/toml v0.3.1 h1:WXkYY....
get_dependency = re.compile(
r'(?P<ns_name>[^\s]+)'
r'\s+'
r'(?P<version>[^\s]+)'
r'\s+'
r'h1:(?P<checksum>[^\s]*)'
).match
def parse_gosum(location):
"""
Return a list of GoSum from parsing the go.sum file at `location`.
Handles go.sum file from Go.
See https://blog.golang.org/using-go-modules for details
A go.sum file contains pinned Go modules checksums of two styles:
For example::
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
... where the line with /go.mod is for a check of that go.mod file
and the other line contains a dirhash for that path as documented as
https://pkg.go.dev/golang.org/x/mod/sumdb/dirhash
For example::
>>> p = get_dependency('github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=')
>>> assert p.group('ns_name') == ('github.com/BurntSushi/toml')
>>> assert p.group('version') == ('v0.3.1')
>>> assert p.group('checksum') == ('WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=')
"""
with io.open(location, encoding='utf-8', closefd=True) as data:
lines = data.readlines()
gosums = []
for line in lines:
line = line.replace('/go.mod', '')
parsed_dep = get_dependency(line)
ns_name = parsed_dep.group('ns_name')
namespace, _, name = ns_name.rpartition('/')
dep = GoModule(
namespace=namespace,
name=name,
version=parsed_dep.group('version')
)
if dep in gosums:
continue
gosums.append(dep)
return gosums
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/packagedcode/go_mod.py
| 0.532911 | 0.210259 |
go_mod.py
|
pypi
|
from commoncode import fileutils
from packagedcode import models
"""
Handle README.*-style semi-structured package metadata.
These are seen in Android, Chromium and a few more places.
"""
# Common README field name mapped to known PackageData field name
PACKAGE_FIELD_BY_README_FIELD = {
'name': 'name',
'project': 'name',
'version': 'version',
'copyright': 'copyright',
'download link': 'download_url',
'downloaded from': 'download_url',
'homepage': 'homepage_url',
'website': 'homepage_url',
'repo': 'homepage_url',
'source': 'homepage_url',
'upstream': 'homepage_url',
'url': 'homepage_url',
'project url': 'homepage_url',
'licence': 'extracted_license_statement',
'license': 'extracted_license_statement',
}
class ReadmeHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'readme'
default_package_type = 'readme'
path_patterns = (
'*/README.android',
'*/README.chromium',
'*/README.facebook',
'*/README.google',
'*/README.thirdparty',
)
@classmethod
def parse(cls, location):
with open(location, encoding='utf-8') as loc:
readme_manifest = loc.read()
package_data = build_package(readme_manifest)
if not package_data.name:
# If no name was detected for the Package, then we use the basename
# of the parent directory as the Package name
parent_dir = fileutils.parent_directory(location)
parent_dir_basename = fileutils.file_base_name(parent_dir)
package_data.name = parent_dir_basename
yield package_data
def build_package(readme_manifest):
"""
Return a Package object from a readme_manifest mapping (from a
README.chromium file or similar) or None.
"""
package = models.PackageData(
datasource_id=ReadmeHandler.datasource_id,
type=ReadmeHandler.default_package_type,
)
for line in readme_manifest.splitlines():
line = line.strip()
if ':' in line:
key, _sep, value = line.partition(':')
elif '=' in line:
key, _sep, value = line.partition('=')
else:
key = None
value = None
if key:
key = key.lower().strip()
if value:
value = value.strip()
if not key or not value:
continue
package_key = PACKAGE_FIELD_BY_README_FIELD.get(key)
if not package_key:
continue
setattr(package, package_key, value)
package.populate_license_fields()
return package
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/packagedcode/readme.py
| 0.638835 | 0.247692 |
readme.py
|
pypi
|
import io
import json
from functools import partial
from packagedcode import models
"""
Parse PHP composer package manifests, see https://getcomposer.org/ and
https://packagist.org/
TODO: add support for composer.lock and packagist formats: both are fairly
similar.
"""
class BasePhpComposerHandler(models.DatafileHandler):
@classmethod
def assemble(cls, package_data, resource, codebase, package_adder):
datafile_name_patterns = (
'composer.json',
'composer.lock',
)
if resource.has_parent():
dir_resource = resource.parent(codebase)
else:
dir_resource = resource
yield from cls.assemble_from_many_datafiles(
datafile_name_patterns=datafile_name_patterns,
directory=dir_resource,
codebase=codebase,
package_adder=package_adder,
)
@classmethod
def assign_package_to_resources(cls, package, resource, codebase, package_adder):
return models.DatafileHandler.assign_package_to_parent_tree(package, resource, codebase, package_adder)
class PhpComposerJsonHandler(BasePhpComposerHandler):
datasource_id = 'php_composer_json'
path_patterns = ('*composer.json',)
default_package_type = 'composer'
default_primary_language = 'PHP'
default_relation_license = 'OR'
description = 'PHP composer manifest'
documentation_url = 'https://getcomposer.org/doc/04-schema.md'
@classmethod
def parse(cls, location):
"""
Yield one or more Package manifest objects given a file ``location``
pointing to a package archive, manifest or similar.
Note that this is NOT exactly the packagist.json format (all are closely
related of course but have important (even if minor) differences.
"""
with io.open(location, encoding='utf-8') as loc:
package_json = json.load(loc)
yield build_package_data(package_json)
def get_repository_homepage_url(namespace, name):
if namespace and name:
return f'https://packagist.org/packages/{namespace}/{name}'
elif name:
return f'https://packagist.org/packages/{name}'
def get_api_data_url(namespace, name):
if namespace and name:
return f'https://packagist.org/p/packages/{namespace}/{name}.json'
elif name:
return f'https://packagist.org/p/packages/{name}.json'
def build_package_data(package_data):
# Note: A composer.json without name and description is not a usable PHP
# composer package. Name and description fields are required but only for
# published packages: https://getcomposer.org/doc/04-schema.md#name We want
# to catch both published and non-published packages here. Therefore, we use
# None as a package name if there is no name.
ns_name = package_data.get('name')
is_private = False
if not ns_name:
ns = None
name = None
is_private = True
else:
ns, _, name = ns_name.rpartition('/')
package = models.PackageData(
datasource_id=PhpComposerJsonHandler.datasource_id,
type=PhpComposerJsonHandler.default_package_type,
namespace=ns,
name=name,
repository_homepage_url=get_repository_homepage_url(ns, name),
api_data_url=get_api_data_url(ns, name),
primary_language=PhpComposerJsonHandler.default_primary_language,
)
# mapping of top level composer.json items to the Package object field name
plain_fields = [
('version', 'version'),
('description', 'summary'),
('keywords', 'keywords'),
('homepage', 'homepage_url'),
]
for source, target in plain_fields:
value = package_data.get(source)
if isinstance(value, str):
value = value.strip()
if value:
setattr(package, target, value)
# mapping of top level composer.json items to a function accepting as
# arguments the composer.json element value and returning an iterable of
# key, values Package Object to update
field_mappers = [
('authors', author_mapper),
('license', partial(licensing_mapper, is_private=is_private)),
('support', support_mapper),
('require', partial(_deps_mapper, scope='require', is_runtime=True)),
('require-dev', partial(_deps_mapper, scope='require-dev', is_optional=True)),
('provide', partial(_deps_mapper, scope='provide', is_runtime=True)),
('conflict', partial(_deps_mapper, scope='conflict', is_runtime=True, is_optional=True)),
('replace', partial(_deps_mapper, scope='replace', is_runtime=True, is_optional=True)),
('suggest', partial(_deps_mapper, scope='suggest', is_runtime=True, is_optional=True)),
('source', source_mapper),
('dist', dist_mapper)
]
for source, func in field_mappers:
value = package_data.get(source)
if value:
if isinstance(value, str):
value = value.strip()
if value:
func(value, package)
# Parse vendor from name value
vendor_mapper(package)
# Per https://getcomposer.org/doc/04-schema.md#license this is an expression
package.populate_license_fields()
return package
class PhpComposerLockHandler(BasePhpComposerHandler):
datasource_id = 'php_composer_lock'
path_patterns = ('*composer.lock',)
default_package_type = 'composer'
default_primary_language = 'PHP'
description = 'PHP composer lockfile'
documentation_url = 'https://getcomposer.org/doc/01-basic-usage.md#commit-your-composer-lock-file-to-version-control'
@classmethod
def parse(cls, location):
with io.open(location, encoding='utf-8') as loc:
package_data = json.load(loc)
packages = [
build_package_data(p)
for p in package_data.get('packages', [])
]
packages_dev = [
build_package_data(p)
for p in package_data.get('packages-dev', [])
]
required_deps = [
build_dep_package(p, scope='require', is_runtime=True, is_optional=False)
for p in packages
]
required_dev_deps = [
build_dep_package(p, scope='require-dev', is_runtime=False, is_optional=True)
for p in packages_dev
]
yield models.PackageData(
datasource_id=cls.datasource_id,
type=cls.default_package_type,
primary_language=cls.default_primary_language,
dependencies=required_deps + required_dev_deps
)
for package in packages + packages_dev:
yield package
def licensing_mapper(licenses, package, is_private=False):
"""
Update package licensing and return package.
Licensing data structure has evolved over time and is a tad messy.
https://getcomposer.org/doc/04-schema.md#license
The value of license is either:
- an SPDX expression string: { "license": "(LGPL-2.1 or GPL-3.0+)" }
- a list of SPDX license ids choices: "license": ["LGPL-2.1","GPL-3.0+"]
Some older licenses are plain strings and not SPDX ids. Also if there is no
license and the `is_private` Fkag is True, we return a "proprietary-license"
license.
"""
if not licenses and is_private:
package.extracted_license_statement = 'proprietary-license'
return package
package.extracted_license_statement = licenses
return package
def author_mapper(authors_content, package):
"""
Update package parties with authors and return package.
https://getcomposer.org/doc/04-schema.md#authors
"""
for name, role, email, url in parse_person(authors_content):
role = role or 'author'
package.parties.append(
models.Party(type=models.party_person, name=name,
role=role, email=email, url=url))
return package
def support_mapper(support, package):
"""
Update support and bug tracking url.
https://getcomposer.org/doc/04-schema.md#support
"""
# TODO: there are many other information we ignore for now
package.bug_tracking_url = support.get('issues') or None
package.code_view_url = support.get('source') or None
return package
def source_mapper(source, package):
"""
Add vcs_url from source tag, if present. Typically only present in
composer.lock
"""
tool = source.get('type')
if not tool:
return package
url = source.get('url')
if not url:
return package
version = source.get('reference')
package.vcs_url = '{tool}+{url}@{version}'.format(**locals())
return package
def dist_mapper(dist, package):
"""
Add download_url from source tag, if present. Typically only present in
composer.lock
"""
url = dist.get('url')
if not url:
return package
package.download_url = url
return package
def vendor_mapper(package):
"""
Vendor is the first part of the name element.
https://getcomposer.org/doc/04-schema.md#name
"""
if package.namespace:
package.parties.append(
models.Party(type=models.party_person,
name=package.namespace, role='vendor'))
return package
def _deps_mapper(deps, package, scope, is_runtime=False, is_optional=False):
"""
Handle deps such as dependencies, devDependencies
return a tuple of (dep type, list of deps)
https://getcomposer.org/doc/04-schema.md#package-links
"""
for ns_name, requirement in deps.items():
ns, _, name = ns_name.rpartition('/')
purl = models.PackageURL(type='composer', namespace=ns, name=name).to_string()
dep = models.DependentPackage(
purl=purl,
extracted_requirement=requirement,
scope=scope,
is_runtime=is_runtime,
is_optional=is_optional)
package.dependencies.append(dep)
return package
def parse_person(persons):
"""
https://getcomposer.org/doc/04-schema.md#authors
A "person" is an object with a "name" field and optionally "url" and "email".
Yield a name, email, url tuple for a person object
A person can be in the form:
"authors": [
{
"name": "Nils Adermann",
"email": "[email protected]",
"homepage": "http://www.naderman.de",
"role": "Developer"
},
{
"name": "Jordi Boggiano",
"email": "[email protected]",
"homepage": "http://seld.be",
"role": "Developer"
}
]
Both forms are equivalent.
"""
if isinstance(persons, list):
for person in persons:
# ensure we have our three values
name = person.get('name')
role = person.get('role')
email = person.get('email')
url = person.get('homepage')
# FIXME: this got cargoculted from npm package.json parsing
yield (
name and name.strip(),
role and role.strip(),
email and email.strip('<> '),
url and url.strip('() '))
else:
raise ValueError('Incorrect PHP composer persons: %(persons)r' % locals())
def build_dep_package(package, scope, is_runtime, is_optional):
return models.DependentPackage(
purl=package.purl,
scope=scope,
is_runtime=is_runtime,
is_optional=is_optional,
is_resolved=True,
)
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/packagedcode/phpcomposer.py
| 0.430028 | 0.158207 |
phpcomposer.py
|
pypi
|
import io
import json
from packageurl import PackageURL
from packagedcode import models
"""
Handle haxelib Haxe packages
See
- https://lib.haxe.org/all/ this lists all the packages.
- https://lib.haxe.org/documentation/creating-a-haxelib-package/
- https://github.com/HaxeFoundation/haxelib
- https://github.com/gogoprog/hxsocketio/blob/master/haxelib.json
- https://github.com/HaxeFoundation/haxelib/blob/development/haxelib.json
Download and homepage are using these conventions:
- https://lib.haxe.org/p/format/
- https://lib.haxe.org/files/3.0/tweenx-1,0,4.zip
- https://lib.haxe.org/p/format/3.4.1/download/
- https://lib.haxe.org/files/3.0/format-3,4,1.zip
"""
# TODO: Update the license based on a mapping:
# Per the doc:
# Can be GPL, LGPL, BSD, Public (for Public Domain), MIT, or Apache.
class HaxelibJsonHandler(models.DatafileHandler):
datasource_id = 'haxelib_json'
path_patterns = ('*/haxelib.json',)
default_package_type = 'haxe'
default_primary_language = 'Haxe'
description = 'Haxe haxelib.json metadata file'
documentation_url = 'https://lib.haxe.org/documentation/creating-a-haxelib-package/'
@classmethod
def _parse(cls, json_data):
name = json_data.get('name')
version = json_data.get('version')
package_data = models.PackageData(
datasource_id=cls.datasource_id,
type=cls.default_package_type,
name=name,
version=version,
homepage_url=json_data.get('url'),
extracted_license_statement=json_data.get('license'),
keywords=json_data.get('tags'),
description=json_data.get('description'),
primary_language=cls.default_primary_language,
)
if name and version:
download_url = f'https://lib.haxe.org/p/{name}/{version}/download/'
package_data.repository_download_url = download_url
package_data.download_url = download_url
if name:
package_data.repository_homepage_url = f'https://lib.haxe.org/p/{name}'
for contrib in json_data.get('contributors', []):
party = models.Party(
type=models.party_person,
name=contrib,
role='contributor',
url='https://lib.haxe.org/u/{}'.format(contrib))
package_data.parties.append(party)
for dep_name, dep_version in json_data.get('dependencies', {}).items():
dep_version = dep_version and dep_version.strip()
is_resolved = bool(dep_version)
dep_purl = PackageURL(
type=cls.default_package_type,
name=dep_name,
version=dep_version
).to_string()
dep = models.DependentPackage(purl=dep_purl, is_resolved=is_resolved,)
package_data.dependencies.append(dep)
return package_data
@classmethod
def parse(cls, location):
"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package_data archive, manifest or similar.
{
"name": "haxelib",
"url" : "https://lib.haxe.org/documentation/",
"license": "GPL",
"tags": ["haxelib", "core"],
"description": "The haxelib client",
"classPath": "src",
"version": "3.4.0",
"releasenote": " * Fix password input issue in Windows (#421).\n * ....",
"contributors": ["back2dos", "ncannasse", "jason", "Simn", "nadako", "andyli"]
}
"""
with io.open(location, encoding='utf-8') as loc:
json_data = json.load(loc)
yield cls._parse(json_data)
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/packagedcode/haxe.py
| 0.477067 | 0.245854 |
haxe.py
|
pypi
|
from contextlib import closing
import pefile
from ftfy import fix_text
from commoncode import text
from packagedcode import models
from packagedcode.models import Party
from packagedcode.models import party_org
from typecode import contenttype
TRACE = False
def logger_debug(*args):
pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(
isinstance(a, str) and a or repr(a) for a in args))
"""
Extract data from windows PE DLLs and executable.
Note that the extraction may not be correct for all PE in particular
older legacy PEs. See tests and:
http://msdn.microsoft.com/en-us/library/aa381058%28v=VS.85%29.aspx
PE stores data in a "VarInfo" structure for "variable information".
VarInfo are by definition variable key/value pairs:
http://msdn.microsoft.com/en-us/library/ms646995%28v=vs.85%29.aspx
Therefore we use a list of the most common and useful key names with
an eye on origin and license related information and return a value
when there is one present.
"""
"""
https://docs.microsoft.com/en-us/windows/win32/menurc/versioninfo-resource
Name Description
Comments Additional information that should be displayed for
diagnostic purposes.
CompanyName Company that produced the file?for example, "Microsoft
Corporation" or "Standard Microsystems Corporation, Inc."
This string is required.
FileDescription File description to be presented to users. This string may
be displayed in a list box when the user is choosing files
to install?for example, "Keyboard Driver for AT-Style
Keyboards". This string is required.
FileVersion Version number of the file?for example, "3.10" or
"5.00.RC2". This string is required.
InternalName Internal name of the file, if one exists?for example, a
module name if the file is a dynamic-link library. If the
file has no internal name, this string should be the
original filename, without extension. This string is
required.
LegalCopyright Copyright notices that apply to the file. This should
include the full text of all notices, legal symbols,
copyright dates, and so on. This string is optional.
LegalTrademarks Trademarks and registered trademarks that apply to the file.
This should include the full text of all notices, legal
symbols, trademark numbers, and so on. This string is
optional.
OriginalFilename Original name of the file, not including a path. This
information enables an application to determine whether a
file has been renamed by a user. The format of the name
depends on the file system for which the file was created.
This string is required.
ProductName Name of the product with which the file is distributed. This
string is required.
ProductVersion Version of the product with which the file is
distributed?for example, "3.10" or "5.00.RC2". This string
is required.
"""
# List of common info keys found in PE.
PE_INFO_KEYS = (
'Full Version', # rare and used only by Java exe
'ProductVersion', # the actual version
'FileVersion', # another common version
'Assembly Version', # a version common in MSFT, redundant when present with ProductVersion
'BuildDate', # rare but useful when there 2013/02/04-18:07:46 2018-11-10 14:38
'ProductName', # often present often localized, that's a component name
'OriginalFilename', # name or the original DLL
'InternalName', # often present: sometimes a package name or a .dll or .exe
'License', # rare, seen only in CURL
'LegalCopyright', # copyright notice, sometimes a license tag or URL. Use it for license detection
'LegalTrademarks', # may sometimes contains license or copyright. Ignore a single ".". Treat as part of the declared license
'LegalTrademarks1', # mostly MSFT
'LegalTrademarks2', # mostly MSFT
'LegalTrademarks3', # mostly MSFT
'FileDescription', # description, often localized
'Comments', # random data. Append to a description
'CompanyName', # the company e.g a party, sometimes localized
'Company', # rare, use a fallback if present and CCompanyName missing
'URL', # rarely there but good if there
'WWW', # rarely there but good if there
)
PE_INFO_KEYSET = set(PE_INFO_KEYS)
def pe_info(location):
"""
Return a mapping of common data available for a Windows dll or exe PE
(portable executable).
Return None for non-Windows PE files.
Return an empty mapping for PE from which we could not collect data.
Also collect extra data found if any, returned as a dictionary under the
'extra_data' key in the returned mapping.
"""
if not location:
return {}
result = dict([(k, None,) for k in PE_INFO_KEYS])
extra_data = result['extra_data'] = {}
with closing(pefile.PE(location)) as pe:
if not hasattr(pe, 'FileInfo'):
# No fileinfo section: we return just empties
return result
# >>> pe.FileInfo: this is a list of list of Structure objects:
# [[<Structure: [VarFileInfo] >, <Structure: [StringFileInfo]>]]
file_info = pe.FileInfo
if not file_info or not isinstance(file_info, list):
if TRACE:
logger.debug('pe_info: not file_info')
return result
# here we have a non-empty list
file_info = file_info[0]
if TRACE:
logger.debug('pe_info: file_info:', file_info)
string_file_info = [x for x in file_info
if type(x) == pefile.Structure
and hasattr(x, 'name')
and x.name == 'StringFileInfo']
if not string_file_info:
# No stringfileinfo section: we return just empties
if TRACE:
logger.debug('pe_info: not string_file_info')
return result
string_file_info = string_file_info[0]
if not hasattr(string_file_info, 'StringTable'):
# No fileinfo.StringTable section: we return just empties
if TRACE:
logger.debug('pe_info: not StringTable')
return result
string_table = string_file_info.StringTable
if not string_table or not isinstance(string_table, list):
return result
string_table = string_table[0]
if TRACE:
logger.debug(
'pe_info: Entries keys: ' + str(set(k for k in string_table.entries)))
logger.debug('pe_info: Entry values:')
for k, v in string_table.entries.items():
logger.debug(' ' + str(k) + ': ' + repr(type(v)) + repr(v))
for k, v in string_table.entries.items():
# convert unicode to a safe ASCII representation
key = text.as_unicode(k).strip()
value = text.as_unicode(v).strip()
value = fix_text(value)
if key in PE_INFO_KEYSET:
result[key] = value
else:
extra_data[key] = value
return result
def get_first(mapping, *keys):
"""
Return the first value of the `keys` that is found in the `mapping`.
"""
for key in keys:
value = mapping.get(key)
if value:
return value
def concat(mapping, *keys):
"""
Return a concatenated string of all unique values of the `keys found in the
`mapping`.
"""
values = []
for key in keys:
val = mapping.get(key)
if val and val not in values:
values.append(val)
return '\n'.join(values)
class WindowsExecutableHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'windows_executable'
default_package_type = 'winexe'
filetypes = ('pe32', 'for ms windows',)
path_patterns = (
'*.exe',
'*.dll',
'*.mui',
'*.mun',
'*.com',
'*.winmd',
'*.sys',
'*.tlb',
'*.exe_*',
'*.dll_*',
'*.mui_*',
'*.mun_*',
'*.com_*',
'*.winmd_*',
'*.sys_*',
'*.tlb_*',
'*.ocx',
)
description = 'Windows Portable Executable metadata'
documentation_url = 'https://en.wikipedia.org/wiki/Portable_Executable'
@classmethod
def is_datafile(cls, location, filetypes=tuple()):
"""
Return True if the file at location is highly likely to be a POM.
"""
if super().is_datafile(location, filetypes=filetypes):
return True
T = contenttype.get_type(location)
if T.is_winexe:
return True
@classmethod
def parse(cls, location):
infos = pe_info(location)
version = get_first(
infos,
'Full Version',
'ProductVersion',
'FileVersion',
'Assembly Version',
)
release_date = get_first(infos, 'BuildDate')
if release_date:
if len(release_date) >= 10:
release_date = release_date[:10]
release_date = release_date.replace('/', '-')
name = get_first(
infos,
'ProductName',
'OriginalFilename',
'InternalName',
)
copyr = get_first(infos, 'LegalCopyright')
LegalCopyright = copyr,
LegalTrademarks = concat(
infos,
'LegalTrademarks',
'LegalTrademarks1',
'LegalTrademarks2',
'LegalTrademarks3')
License = get_first(infos, 'License')
extracted_license_statement = {}
if LegalCopyright or LegalTrademarks or License:
extracted_license_statement = dict(
LegalCopyright=copyr,
LegalTrademarks=LegalTrademarks,
License=License
)
description = concat(infos, 'FileDescription', 'Comments')
parties = []
cname = get_first(infos, 'CompanyName', 'Company')
if cname:
parties = [Party(type=party_org, role='author', name=cname)]
homepage_url = get_first(infos, 'URL', 'WWW')
yield models.PackageData(
datasource_id=cls.datasource_id,
type=cls.default_package_type,
name=name,
version=version,
release_date=release_date,
copyright=copyr,
extracted_license_statement=extracted_license_statement,
description=description,
parties=parties,
homepage_url=homepage_url,
)
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/packagedcode/win_pe.py
| 0.564339 | 0.180504 |
win_pe.py
|
pypi
|
try:
from license_expression import Licensing
from license_expression import combine_expressions as le_combine_expressions
except:
Licensing = None
le_combine_expressions = None
PLAIN_URLS = (
'https://',
'http://',
)
VCS_URLS = (
'git://',
'git+git://',
'git+https://',
'git+http://',
'hg://',
'hg+http://',
'hg+https://',
'svn://',
'svn+https://',
'svn+http://',
)
# TODO this does not really normalize the URL
# TODO handle vcs_tool
def normalize_vcs_url(repo_url, vcs_tool=None):
"""
Return a normalized vcs_url version control URL given some `repo_url` and an
optional `vcs_tool` hint (such as 'git', 'hg', etc.
Handles shortcuts for GitHub, GitHub gist, Bitbucket, or GitLab repositories
and more using the same approach as npm install:
See https://docs.npmjs.com/files/package.json#repository
or https://getcomposer.org/doc/05-repositories.md
This is done here in npm:
https://github.com/npm/npm/blob/d3c858ce4cfb3aee515bb299eb034fe1b5e44344/node_modules/hosted-git-info/git-host-info.js
These should be resolved:
npm/npm
gist:11081aaa281
bitbucket:example/repo
gitlab:another/repo
expressjs/serve-static
git://github.com/angular/di.js.git
git://github.com/hapijs/boom
[email protected]:balderdashy/waterline-criteria.git
http://github.com/ariya/esprima.git
http://github.com/isaacs/nopt
https://github.com/chaijs/chai
https://github.com/christkv/kerberos.git
https://gitlab.com/foo/private.git
[email protected]:foo/private.git
"""
if not repo_url or not isinstance(repo_url, str):
return
repo_url = repo_url.strip()
if not repo_url:
return
# TODO: If we match http and https, we may should add more check in
# case if the url is not a repo one. For example, check the domain
# name in the url...
if repo_url.startswith(VCS_URLS + PLAIN_URLS):
return repo_url
if repo_url.startswith('git@'):
tool, _, right = repo_url.partition('@')
if ':' in repo_url:
host, _, repo = right.partition(':')
else:
# [email protected]/Filirom1/npm2aur.git
host, _, repo = right.partition('/')
if any(r in host for r in ('bitbucket', 'gitlab', 'github')):
scheme = 'https'
else:
scheme = 'git'
return '%(scheme)s://%(host)s/%(repo)s' % locals()
# FIXME: where these URL schemes come from??
if repo_url.startswith(('bitbucket:', 'gitlab:', 'github:', 'gist:')):
hoster_urls = {
'bitbucket': 'https://bitbucket.org/%(repo)s',
'github': 'https://github.com/%(repo)s',
'gitlab': 'https://gitlab.com/%(repo)s',
'gist': 'https://gist.github.com/%(repo)s', }
hoster, _, repo = repo_url.partition(':')
return hoster_urls[hoster] % locals()
if len(repo_url.split('/')) == 2:
# implicit github, but that's only on NPM?
return f'https://github.com/{repo_url}'
return repo_url
def build_description(summary, description):
"""
Return a description string from a summary and description
"""
summary = (summary or '').strip()
description = (description or '').strip()
if not description:
description = summary
else:
if summary and summary not in description:
description = '\n'.join([summary , description])
return description
_LICENSING = Licensing and Licensing() or None
def combine_expressions(
expressions,
relation='AND',
unique=True,
licensing=_LICENSING,
):
"""
Return a combined license expression string with relation, given a sequence of
license ``expressions`` strings or LicenseExpression objects.
"""
if not licensing:
raise Exception('combine_expressions: cannot combine combine_expressions without license_expression package.')
return expressions and str(le_combine_expressions(expressions, relation, unique, licensing)) or None
def get_ancestor(levels_up, resource, codebase):
"""
Return the nth-``levels_up`` ancestor Resource of ``resource`` in
``codebase`` or None.
For example, with levels_up=2 and starting with a resource path of
`gem-extract/metadata.gz-extract/metadata.gz-extract`,
then `gem-extract/` should be returned.
"""
rounds = 0
while rounds < levels_up:
resource = resource.parent(codebase)
if not resource:
return
rounds += 1
return resource
def find_root_from_paths(paths, resource, codebase):
"""
Return the resource for the root directory of this filesystem or None, given
a ``resource`` in ``codebase`` with a list of possible resource root-
relative ``paths`` (e.g. extending from the root directory we are looking
for).
"""
for path in paths:
if not resource.path.endswith(path):
continue
return find_root_resource(path=path, resource=resource, codebase=codebase)
def find_root_resource(path, resource, codebase):
"""
Return the resource for the root directory of this filesystem or None, given
a ``resource`` in ``codebase`` with a possible resource root-relative
``path`` (e.g. extending from the root directory we are looking for).
"""
if not resource.path.endswith(path):
return
for _seg in path.split('/'):
resource = resource.parent(codebase)
if not resource:
return
return resource
def yield_dependencies_from_package_data(package_data, datafile_path, package_uid):
"""
Yield a Dependency for each dependency from ``package_data.dependencies``
"""
from packagedcode import models
dependent_packages = package_data.dependencies
if dependent_packages:
yield from models.Dependency.from_dependent_packages(
dependent_packages=dependent_packages,
datafile_path=datafile_path,
datasource_id=package_data.datasource_id,
package_uid=package_uid,
)
def yield_dependencies_from_package_resource(resource, package_uid=None):
"""
Yield a Dependency for each dependency from each package from``resource.package_data``
"""
from packagedcode import models
for pkg_data in resource.package_data:
pkg_data = models.PackageData.from_dict(pkg_data)
yield from yield_dependencies_from_package_data(pkg_data, resource.path, package_uid)
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/packagedcode/utils.py
| 0.509276 | 0.394551 |
utils.py
|
pypi
|
import io
import json
from commoncode import datautils
from packagedcode import models
import attr
from packageurl import PackageURL
"""
Handle Godeps-like Go package dependency data.
Note: there are other dependency tools for Go beside Godeps, yet several use the
same format. Godeps (and glide, etc.) is mostly legacy today and replaced by Go
modules.
"""
# FIXME: update to use the latest vendor conventions.
# consider other legacy format?
# https://github.com/golang/dep/blob/master/Gopkg.lock
# https://github.com/golang/dep/blob/master/Gopkg.toml
class GodepsHandler(models.NonAssemblableDatafileHandler):
datasource_id = 'godeps'
default_package_type = 'golang'
default_primary_language = 'Go'
path_patterns = ('*/Godeps.json',)
description = 'Go Godeps'
documentation_url = 'https://github.com/tools/godep'
@classmethod
def parse(cls, location):
godeps = Godep(location)
if godeps.import_path:
# we create a purl from the import path to parse ns/name nicely
purl = PackageURL.from_string(f'pkg:golang/{godeps.import_path}')
namespace = purl.namespace
name = purl.name
else:
namespace = None
name = None
dependencies = []
deps = godeps.dependencies or []
for dep in deps:
dependencies.append(
models.DependentPackage(
purl=str(PackageURL.from_string(f'pkg:golang/{dep.import_path}')),
extracted_requirement=dep.revision,
scope='Deps',
is_runtime=True,
is_optional=False,
is_resolved=False,
)
)
yield models.PackageData(
datasource_id=cls.datasource_id,
type=cls.default_package_type,
namespace=namespace,
name=name,
primary_language=cls.default_primary_language,
dependencies=dependencies,
)
@classmethod
def assign_package_to_resources(cls, package, resource, codebase, package_adder):
models.DatafileHandler.assign_package_to_parent_tree(package, resource, codebase, package_adder)
@attr.s
class Dep:
import_path = datautils.String()
revision = datautils.String()
comment = datautils.String()
def to_dict(self):
return attr.asdict(self)
# map of Godep names to our own attribute names
NAMES = {
'ImportPath': 'import_path',
'GoVersion': 'go_version',
'Packages': 'packages',
'Deps': 'dependencies',
'Comment': 'comment',
'Rev': 'revision',
}
@attr.s
class Godep:
"""
Represent JSON dep file with this structure:
type Godeps struct {
ImportPath string
GoVersion string // Abridged output of 'go version'.
Packages []string // Arguments to godep save, if any.
Deps []struct {
ImportPath string
Comment string // Description of commit, if present.
Rev string // VCS-specific commit ID.
}
}
ImportPath
GoVersion
Packages
Deps
ImportPath
Comment
Rev
"""
location = datautils.String()
import_path = datautils.String()
go_version = datautils.String()
packages = datautils.List(item_type=str)
dependencies = datautils.List(item_type=Dep)
def __attrs_post_init__(self, *args, **kwargs):
if self.location:
self.load(self.location)
def load(self, location):
"""
Load self from a location string or a file-like object containing a
Godeps JSON.
"""
with io.open(location, encoding='utf-8') as godep:
text = godep.read()
return self.loads(text)
def loads(self, text):
"""
Load a Godeps JSON text.
"""
data = json.loads(text)
for key, value in data.items():
name = NAMES.get(key)
if name == 'dependencies':
self.dependencies = self.parse_deps(value)
else:
setattr(self, name, value)
return self
def parse_deps(self, deps):
"""
Return a list of Dep from a ``deps`` list of dependency mappings.
"""
deps_list = []
for dep in deps:
data = dict((NAMES[key], value) for key, value in dep.items())
deps_list.append(Dep(**data))
return deps_list or []
def to_dict(self):
return {
'import_path': self.import_path,
'go_version': self.go_version,
'packages': self.packages,
'dependencies': [d.to_dict() for d in self.dependencies],
}
def parse(location):
"""
Return a mapping of parsed Godeps from the file at `location`.
"""
return Godep(location).to_dict()
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/packagedcode/godeps.py
| 0.538741 | 0.242441 |
godeps.py
|
pypi
|
import warnings
import saneyaml
from packageurl import PackageURL
from packagedcode import models
"""
Collect data from Dart pub packages.
See https://dart.dev/tools/pub/pubspec
API has theses URLs:
is limited and only returns all versions of a package
- feeds https://pub.dev/feed.atom
- all packages, paginated: https://pub.dev/api/packages
- one package, all version: https://pub.dev/api/packages/painter
- one version: https://pub.dev/api/packages/painter/versions/0.3.1
See https://github.com/dart-lang/pub/blob/master/doc/repository-spec-v2.md
"""
# FIXME: warnings reported here DO NOT work. We should have a better way
class BaseDartPubspecHandler(models.DatafileHandler):
@classmethod
def assemble(cls, package_data, resource, codebase, package_adder):
datafile_name_patterns = \
DartPubspecYamlHandler.path_patterns + DartPubspecLockHandler.path_patterns
if resource.has_parent():
dir_resource = resource.parent(codebase)
else:
dir_resource = resource
yield from cls.assemble_from_many_datafiles(
datafile_name_patterns=datafile_name_patterns,
directory=dir_resource,
codebase=codebase,
package_adder=package_adder,
)
class DartPubspecYamlHandler(BaseDartPubspecHandler):
datasource_id = 'pubspec_yaml'
path_patterns = ('*pubspec.yaml',)
default_package_type = 'pubspec'
default_primary_language = 'dart'
description = 'Dart pubspec manifest'
documentation_url = 'https://dart.dev/tools/pub/pubspec'
@classmethod
def parse(cls, location):
with open(location) as inp:
pubspec_data = saneyaml.load(inp.read())
package_data = build_package(pubspec_data)
if package_data:
yield package_data
class DartPubspecLockHandler(BaseDartPubspecHandler):
datasource_id = 'pubspec_lock'
path_patterns = ('*pubspec.lock',)
default_package_type = 'pubspec'
default_primary_language = 'dart'
description = 'Dart pubspec lockfile'
documentation_url = 'https://web.archive.org/web/20220330081004/https://gpalma.pt/blog/what-is-the-pubspec-lock/'
@classmethod
def parse(cls, location):
with open(location) as inp:
locks_data = saneyaml.load(inp.read())
dependencies = list(collect_locks(locks_data))
yield models.PackageData(
datasource_id=cls.datasource_id,
type=cls.default_package_type,
primary_language=cls.default_primary_language,
dependencies=dependencies
)
def collect_locks(locks_data):
"""
Yield DependentPackage from locks data
The general form is
packages:
_fe_analyzer_shared:
dependency: transitive
description:
name: _fe_analyzer_shared
url: "https://pub.dartlang.org"
source: hosted
version: "22.0.0"
sdks:
dart: ">=2.12.0 <3.0.0"
"""
# FIXME: we treat all as nno optioanl for now
sdks = locks_data.get('sdks') or {}
for name, version in sdks.items():
dep = build_dep(
name,
version,
scope='sdk',
is_runtime=True,
is_optional=False,
)
yield dep
packages = locks_data.get('packages') or {}
for name, details in packages.items():
version = details.get('version')
# FIXME: see https://github.com/dart-lang/pub/blob/2a08832e0b997ff92de65571b6d79a9b9099faa0/lib/src/lock_file.dart#L344
# transitive, direct main, direct dev, direct overridden.
# they do not map exactly to the pubspec scopes since transitive can be
# either main or dev
scope = details.get('dependency')
if scope == 'direct dev':
is_runtime = False
else:
is_runtime = True
desc = details.get('description') or {}
known_desc = isinstance(desc, dict)
# issue a warning for unknown data structure
warn = False
if not known_desc:
if not (isinstance(desc, str) and desc == 'flutter'):
warn = True
else:
dname = desc.get('name')
durl = desc.get('url')
dsource = details.get('source')
if (
(dname and dname != name)
or (durl and durl != 'https://pub.dartlang.org')
or (dsource and dsource not in ['hosted', 'sdk', ])
):
warn = True
if warn:
warnings.warn(
f'Dart pubspec.locks with unsupported external repo '
f'description or source: {details}',
stacklevel=1,
)
dep = build_dep(
name,
version,
scope=scope,
is_runtime=is_runtime,
is_optional=False,
)
yield dep
def collect_deps(data, dependency_field_name, is_runtime=True, is_optional=False):
"""
Yield DependentPackage found in the ``dependency_field_name`` of ``data``.
Use is_runtime and is_optional in created DependentPackage.
The shape of the data is:
dependencies:
path: 1.7.0
meta: ^1.2.4
yaml: ^3.1.0
environment:
sdk: '>=2.12.0 <3.0.0'
"""
# TODO: these can be more complex for SDKs
# https://dart.dev/tools/pub/dependencies#dependency-sources
dependencies = data.get(dependency_field_name) or {}
for name, version in dependencies.items():
dep = build_dep(
name,
version,
scope=dependency_field_name,
is_runtime=is_runtime,
is_optional=is_optional,
)
yield dep
def build_dep(name, version, scope, is_runtime=True, is_optional=False):
"""
Return DependentPackage from the provided data.
"""
# TODO: these can be more complex for SDKs
# https://dart.dev/tools/pub/dependencies#dependency-sources
if isinstance(version, dict) and 'sdk' in version:
# {'sdk': 'flutter'} type of deps....
# which is a wart that we keep as a requiremnet
version = ', '.join(': '.join([k, str(v)]) for k, v in version.items())
if version.replace('.', '').isdigit():
# version is pinned exactly if it is only made of dots and digits
purl = PackageURL(
type='pubspec',
name=name,
version=version,
)
is_resolved = True
else:
purl = PackageURL(
type='pubspec',
name=name,
)
is_resolved = False
dep = models.DependentPackage(
purl=purl.to_string(),
extracted_requirement=version,
scope=scope,
is_runtime=is_runtime,
is_optional=is_optional,
is_resolved=is_resolved,
)
return dep
def build_package(pubspec_data):
"""
Return a package object from a package data mapping or None
"""
name = pubspec_data.get('name')
version = pubspec_data.get('version')
description = pubspec_data.get('description')
homepage_url = pubspec_data.get('homepage')
extracted_license_statement = pubspec_data.get('license')
vcs_url = pubspec_data.get('repository')
download_url = pubspec_data.get('archive_url')
api_data_url = name and version and f'https://pub.dev/api/packages/{name}/versions/{version}'
repository_homepage_url = name and version and f'https://pub.dev/packages/{name}/versions/{version}'
# A URL should be in the form of:
# https://pub.dartlang.org/packages/url_launcher/versions/6.0.9.tar.gz
# And it may resolve to:
# https://storage.googleapis.com/pub-packages/packages/http-0.13.2.tar.gz
# as seen in the pub.dev web pages
repository_download_url = name and version and f'https://pub.dartlang.org/packages/{name}/versions/{version}.tar.gz'
download_url = download_url or repository_download_url
# Author and authors are deprecated
authors = []
author = pubspec_data.get('author')
if author:
authors.append(author)
authors.extend(pubspec_data.get('authors') or [])
parties = []
for auth in authors:
parties.append(models.Party(
type=models.party_person,
role='author',
name=auth
))
package_dependencies = []
dependencies = collect_deps(
pubspec_data,
'dependencies',
is_runtime=True,
is_optional=False,
)
package_dependencies.extend(dependencies)
dev_dependencies = collect_deps(
pubspec_data,
'dev_dependencies',
is_runtime=False,
is_optional=True,
)
package_dependencies.extend(dev_dependencies)
env_dependencies = collect_deps(
pubspec_data,
'environment',
is_runtime=True,
is_optional=False,
)
package_dependencies.extend(env_dependencies)
extra_data = {}
def add_to_extra_if_present(_key):
_value = pubspec_data.get(_key)
if _value:
extra_data[_key] = _value
add_to_extra_if_present('issue_tracker')
add_to_extra_if_present('documentation')
add_to_extra_if_present('dependencies_overrides')
add_to_extra_if_present('executables')
add_to_extra_if_present('publish_to')
return models.PackageData(
datasource_id=DartPubspecYamlHandler.datasource_id,
type=DartPubspecYamlHandler.default_primary_language,
primary_language=DartPubspecYamlHandler.default_primary_language,
name=name,
version=version,
download_url=download_url,
vcs_url=vcs_url,
description=description,
extracted_license_statement=extracted_license_statement,
parties=parties,
homepage_url=homepage_url,
dependencies=package_dependencies,
extra_data=extra_data,
repository_homepage_url=repository_homepage_url,
api_data_url=api_data_url,
repository_download_url=repository_download_url,
)
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/packagedcode/pubspec.py
| 0.525612 | 0.209834 |
pubspec.py
|
pypi
|
import re
from collections import defaultdict
from binascii import crc32
from itertools import islice
from licensedcode.stopwords import STOPWORDS
from textcode.analysis import numbered_text_lines
"""
Utilities to break texts in lines and tokens (aka. words) with specialized
version for queries and rules texts.
"""
def query_lines(
location=None,
query_string=None,
strip=True,
start_line=1,
plain_text=False,
):
"""
Return an iterable of tuples (line number, text line) given a file at
`location` or a `query string`. Include empty lines.
Line numbers start at ``start_line`` which is 1-based by default.
If `plain_text` is True treat the file as a plain text file and do not
attempt to detect its type and extract its content with special procedures.
This is used mostly when loading license texts and rules.
"""
# TODO: OPTIMIZE: tokenizing line by line may be rather slow
# we could instead get lines and tokens at once in a batch?
numbered_lines = []
if location:
numbered_lines = numbered_text_lines(
location,
demarkup=False,
start_line=start_line,
plain_text=plain_text,
)
elif query_string:
if strip:
keepends = False
else:
keepends = True
numbered_lines = enumerate(
query_string.splitlines(keepends),
start_line,
)
for line_number, line in numbered_lines:
if strip:
yield line_number, line.strip()
else:
yield line_number, line.rstrip('\n') + '\n'
# Split on whitespace and punctuations: keep only characters and numbers and +
# when in the middle or end of a word. Keeping the trailing + is important for
# licenses name such as GPL2+. The use a double negation "not non word" meaning
# "words" to define the character ranges
query_pattern = '[^_\\W]+\\+?[^_\\W]*'
word_splitter = re.compile(query_pattern, re.UNICODE).findall
key_phrase_pattern = '(?:' + query_pattern + '|\\{\\{|\\}\\})'
key_phrase_splitter = re.compile(key_phrase_pattern, re.UNICODE).findall
KEY_PHRASE_OPEN = '{{'
KEY_PHRASE_CLOSE = '}}'
# FIXME: this should be folded in a single pass tokenization with the index_tokenizer
def key_phrase_tokenizer(text, stopwords=STOPWORDS):
"""
Yield tokens from a rule ``text`` including key phrases {{brace}} markers.
This tokenizer behaves the same as as the ``index_tokenizer`` returning also
KEY_PHRASE_OPEN and KEY_PHRASE_CLOSE as separate tokens so that they can be
used to parse key phrases.
>>> x = list(key_phrase_splitter('{{AGPL-3.0 GNU Affero License v3.0}}'))
>>> assert x == ['{{', 'AGPL', '3', '0', 'GNU', 'Affero', 'License', 'v3', '0', '}}'], x
>>> x = list(key_phrase_splitter('{{{AGPL{{{{Affero }}License}}0}}'))
>>> assert x == ['{{', 'AGPL', '{{', '{{', 'Affero', '}}', 'License', '}}', '0', '}}'], x
>>> list(index_tokenizer('')) == []
True
>>> x = list(index_tokenizer('{{AGPL-3.0 GNU Affero License v3.0}}'))
>>> assert x == ['agpl', '3', '0', 'gnu', 'affero', 'license', 'v3', '0']
>>> x = list(key_phrase_tokenizer('{{AGPL-3.0 GNU Affero License v3.0}}'))
>>> assert x == ['{{', 'agpl', '3', '0', 'gnu', 'affero', 'license', 'v3', '0', '}}']
"""
if not text:
return
for token in key_phrase_splitter(text.lower()):
if token and token not in stopwords:
yield token
def index_tokenizer(text, stopwords=STOPWORDS):
"""
Return an iterable of tokens from a rule or query ``text`` using index
tokenizing rules. Ignore words that exist as lowercase in the ``stopwords``
set.
For example::
>>> list(index_tokenizer(''))
[]
>>> x = list(index_tokenizer('some Text with spAces! + _ -'))
>>> assert x == ['some', 'text', 'with', 'spaces']
>>> x = list(index_tokenizer('{{}some }}Text with spAces! + _ -'))
>>> assert x == ['some', 'text', 'with', 'spaces']
>>> x = list(index_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}'))
>>> assert x == ['hi', 'some', 'text', 'with', 'noth+', 'ing', 'junk', 'spaces']
>>> stops = set(['quot', 'lt', 'gt'])
>>> x = list(index_tokenizer('some "< markup >"', stopwords=stops))
>>> assert x == ['some', 'markup']
"""
if not text:
return []
words = word_splitter(text.lower())
return (token for token in words if token and token not in stopwords)
def index_tokenizer_with_stopwords(text, stopwords=STOPWORDS):
"""
Return a tuple of (tokens, stopwords_by_pos) for a rule
``text`` using index tokenizing rules where tokens is a list of tokens and
stopwords_by_pos is a mapping of {pos: stops count} where "pos" is a token
position and "stops count" is the number of stopword tokens after this
position if any. For stopwords at the start, the position is using the magic
-1 key. Use the lowercase ``stopwords`` set.
For example::
>>> toks, stops = index_tokenizer_with_stopwords('')
>>> assert toks == [], (toks, stops)
>>> assert stops == {}
>>> toks, stops = index_tokenizer_with_stopwords('some Text with spAces! + _ -')
>>> assert toks == ['some', 'text', 'with', 'spaces'], (toks, stops)
>>> assert stops == {}
>>> toks, stops = index_tokenizer_with_stopwords('{{}some }}Text with spAces! + _ -')
>>> assert toks == ['some', 'text', 'with', 'spaces'], (toks, stops)
>>> assert stops == {}
>>> toks, stops = index_tokenizer_with_stopwords('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}')
>>> assert toks == ['hi', 'some', 'text', 'with', 'noth+', 'ing', 'junk', 'spaces'], (toks, stops)
>>> assert stops == {}
>>> stops = set(['quot', 'lt', 'gt'])
>>> toks, stops = index_tokenizer_with_stopwords('some "< markup >"', stopwords=stops)
>>> assert toks == ['some', 'markup'], (toks, stops)
>>> assert stops == {0: 2, 1: 2}
>>> toks, stops = index_tokenizer_with_stopwords('{{g', stopwords=stops)
>>> assert toks == ['g'], (toks, stops)
>>> assert stops == {}
"""
if not text:
return [], {}
tokens = []
tokens_append = tokens.append
# we use a defaultdict as a convenience at construction time
# TODO: use the actual words and not just a count
stopwords_by_pos = defaultdict(int)
pos = -1
for token in word_splitter(text.lower()):
if token:
if token in stopwords:
# If we have not yet started, then all tokens seen so far
# are stopwords and we keep a count of them in the magic
# "-1" position.
stopwords_by_pos[pos] += 1
else:
pos += 1
tokens_append(token)
return tokens, dict(stopwords_by_pos)
def query_tokenizer(text):
"""
Return an iterable of tokens from a unicode query text. Do not ignore stop
words. They are handled at a later stage in a query.
For example::
>>> list(query_tokenizer(''))
[]
>>> x = list(query_tokenizer('some Text with spAces! + _ -'))
>>> assert x == ['some', 'text', 'with', 'spaces']
>>> x = list(query_tokenizer('{{}some }}Text with spAces! + _ -'))
>>> assert x == ['some', 'text', 'with', 'spaces']
>>> x = list(query_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}'))
>>> assert x == ['hi', 'some', 'text', 'with', 'noth+', 'ing', 'junk', 'spaces']
"""
if not text:
return []
words = word_splitter(text.lower())
return (token for token in words if token)
# Alternate pattern which is the opposite of query_pattern used for
# matched text collection
not_query_pattern = '[_\\W\\s\\+]+[_\\W\\s]?'
# collect tokens and non-token texts in two different groups
_text_capture_pattern = (
'(?P<token>' +
query_pattern +
')' +
'|' +
'(?P<punct>' +
not_query_pattern +
')'
)
tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer
def matched_query_text_tokenizer(text):
"""
Return an iterable of tokens and non-tokens punctuation from a unicode query
text keeping everything (including punctuations, line endings, etc.)
The returned iterable contains 2-tuples of:
- True if the string is a text token or False if this is not
(such as punctuation, spaces, etc).
- the corresponding string.
This is used to reconstruct the matched query text for reporting.
"""
if not text:
return
for match in tokens_and_non_tokens(text):
if match:
mgd = match.groupdict()
token = mgd.get('token')
punct = mgd.get('punct')
if token:
yield True, token
elif punct:
yield False, punct
else:
# this should never happen
raise Exception('Internal error in matched_query_text_tokenizer')
def ngrams(iterable, ngram_length):
"""
Return an iterable of ngrams of length `ngram_length` given an `iterable`.
Each ngram is a tuple of `ngram_length` items.
The returned iterable is empty if the input iterable contains less than
`ngram_length` items.
Note: this is a fairly arcane but optimized way to compute ngrams.
For example:
>>> list(ngrams([1,2,3,4,5], 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams([1,2,3,4,5], 4))
[(1, 2, 3, 4), (2, 3, 4, 5)]
>>> list(ngrams([1,2,3,4], 2))
[(1, 2), (2, 3), (3, 4)]
>>> list(ngrams([1,2,3], 2))
[(1, 2), (2, 3)]
>>> list(ngrams([1,2], 2))
[(1, 2)]
>>> list(ngrams([1], 2))
[]
This also works with arrays or tuples:
>>> from array import array
>>> list(ngrams(array('h', [1,2,3,4,5]), 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams(tuple([1,2,3,4,5]), 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
"""
return zip(*(islice(iterable, i, None) for i in range(ngram_length)))
def select_ngrams(ngrams, with_pos=False):
"""
Return an iterable as a subset of a sequence of ngrams using the hailstorm
algorithm. If `with_pos` is True also include the starting position for the
ngram in the original sequence.
Definition from the paper: http://www2009.eprints.org/7/1/p61.pdf
The algorithm first fingerprints every token and then selects a shingle s
if the minimum fingerprint value of all k tokens in s occurs at the first
or the last position of s (and potentially also in between). Due to the
probabilistic properties of Rabin fingerprints the probability that a
shingle is chosen is 2/k if all tokens in the shingle are different.
For example:
>>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]))
[(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]
Positions can also be included. In this case, tuple of (pos, ngram) are returned:
>>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)], with_pos=True))
[(0, (2, 1, 3)), (1, (1, 1, 3)), (2, (5, 1, 3)), (3, (2, 6, 1)), (4, (7, 3, 4))]
This works also from a generator:
>>> list(select_ngrams(x for x in [(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]))
[(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]
"""
last = None
for pos, ngram in enumerate(ngrams):
# FIXME: use a proper hash
nghs = []
for ng in ngram:
if isinstance(ng, str):
ng = bytearray(ng, encoding='utf-8')
else:
ng = bytearray(str(ng).encode('utf-8'))
nghs.append(crc32(ng) & 0xffffffff)
min_hash = min(nghs)
if with_pos:
ngram = (pos, ngram,)
if min_hash in (nghs[0], nghs[-1]):
yield ngram
last = ngram
else:
# always yield the first or last ngram too.
if pos == 0:
yield ngram
last = ngram
if last != ngram:
yield ngram
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/licensedcode/tokenize.py
| 0.533884 | 0.333856 |
tokenize.py
|
pypi
|
ScanCode license detection overview and key design elements
===========================================================
License detection is about finding common texts between the text of a query file
being scanned and the texts of the indexed license texts and rule texts. The process
strives to be correct first and fast second.
Ideally we want to find the best alignment possible between two texts so we know
exactly where they match: the scanned text and one or more of the many license texts.
We settle for good alignments rather than optimal alignments by still returning
accurate and correct matches in a reasonable amount of time.
Correctness is essential but efficiency too: both in terms of speed and memory usage.
One key to efficient matching is to process not characters but whole words and use
internally not strings but integers to represent a word.
Rules and licenses
------------------
The detection uses an index of reference license texts and a set of "rules" that are
common notices or mentions of these licenses. The things that makes detection
sometimes difficult is that a license reference can be very short as in "this is GPL"
or very long as a full license text for the GPLv3. To cope with this we use different
matching strategies and also compute the resemblance and containment of texts that
are matched.
Words as integers
-----------------
A dictionary mapping words to a unique integer is used to transform a scanned text
"query" words and reference indexed license texts and rules words to numbers.
This is possible because we have a limited number of words across all the license
texts (about 15K). We further assign these ids to words such that very common words
have a low id and less common, more discriminant words have a higher id. And define a
thresholds for this ids range such that very common words below that threshold cannot
possible form a license text or mention together.
Once that mapping is applied, the detection then only deal with integers in two
dimensions:
- the token ids (and whether they are in the high or low range).
- their positions in the query (qpos) and the indexed rule (ipos).
We also use an integer id for a rule.
All operations are from then on dealing with list, arrays or sets of integers in
defined ranges.
Matches are reduced to sets of integers we call "Spans":
- matched positions on the query side
- matched positions on the index side
By using integers in known ranges throughout, several operations are reduced to
integer and integer sets or lists comparisons and intersection. These operations
are faster and more readily optimizable.
With integers, we also use less memory:
- we can use arrays of unsigned 16 bits ints that store each number on two bytes
rather than bigger lists of ints.
- we can replace dictionaries by sparse lists or arrays where the index is an integer key.
- we can use succinct, bit level representations (e.g. bitmaps) of integer sets.
Smaller data structures also means faster processing as the processors need to move
less data in memory.
With integers we can also be faster:
- a dict key lookup is slower than a list of array index lookup.
- processing large list of small structures is faster (such as bitmaps, etc).
- we can leverage libraries that speed up integer set operations.
Common/junk tokens
------------------
The quality and speed of detection is supported by classifying each word as either
good/discriminant or common/junk. Junk tokens are either very frequent of tokens that
taken together together cannot form some valid license mention or notice. When a
numeric id is assigned to a token during initial indexing, junk tokens are assigned a
lower id than good tokens. These are then called low or junk tokens and high or good
tokens.
Query processing
----------------
When a file is scanned, it is first converted to a query object which is a list of
integer token ids. A query is further broken down in slices (a.k.a. query runs) based
on heuristics.
While the query is processed a set of matched and matchable positions for for high
and low token ids is kept to track what is left to do in matching.
Matching pipeline
-----------------
The matching pipeline consist of:
- we start with matching the whole query at once against hashes on the whole text
looked up agains a mapping of hash to license rule. We exit if we have a match.
- then we match the whole query for exact matches using an automaton (Aho-Corasick).
We exit if we have a match.
- then each query run is processed in sequence:
- the best potentially matching rules are found with two rounds of approximate
"set" matching. This set matching uses a "bag of words" approach where the
scanned text is transformed in a vector of integers based on the presence or
absence of a word. It is compared against the index of vectors. This is similar
conceptually to a traditional inverted index search for information retrieval.
The best matches are ranked using a resemblance and containment comparison. A
second round is performed on the best matches using multisets which are set where
the number of occurrence of each word is also taken into account. The best matches
are ranked again using a resemblance and containment comparison and is more
accurate than the previous set matching.
- using the ranked potential candidate matches from the two previous rounds, we
then perform a pair-wise local sequence alignment between these candidates and
the query run. This sequence alignment is essentially an optimized diff working
on integer sequences and takes advantage of the fact that some very frequent
words are considered less discriminant: this speeds up the sequence alignment
significantly. The number of multiple local sequence alignments that are required
in this step is also made much smaller by the pre-matching done using sets.
- finally all the collected matches are merged, refined and filtered to yield the
final results. The merging considers the ressemblance, containment and overlap
between scanned texts and the matched texts and several secondary factors.
Filtering is based on the density and length of matches as well as the number of
good or frequent tokens matched.
Last, each match receives a score which is the based on the length of the rule text
and how of this rule text was matched. Optionally we can also collect the exact
matched texts and which part was not match for each match.
Comparison with other tools approaches
--------------------------------------
Most tools use regular expressions. The problem is that creating these expressions
requires a lot of intimate knowledge of the data set and the relation between each
license texts. The maintenance effort is high. And regex matches typically need a
complex second pass of disambiguation for similar matches.
Some tools use an index of pre-defined sentences and match these as regex and then
reassemble possible matches. They tend to suffer from the same issues as a pure regex
based approach and require an intimate knowledge of the license texts and how they
relate to each other.
Some tools use pair-wise comparisons like ScanCode. But in doing so they usually
perform poorly because a multiple local sequence alignment is an expensisve
computation. Say you scan 1000 files and you have 1000 reference texts. You would
need to recursively make multiple times 1000 comparisons with each scanned file very
quickly performing the equivalent 100 million diffs or more to process these files.
Because of the progressive matching pipeline used in ScanCode, sequence alignments
may not be needed at all in the common cases and when they are, only a few are
needed.
See also this list: https://wiki.debian.org/CopyrightReviewTools
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/licensedcode/README.rst
| 0.932699 | 0.982288 |
README.rst
|
pypi
|
from collections.abc import Set
from itertools import count
from itertools import groupby
from intbitset import intbitset
"""
Ranges and intervals of integers using bitmaps.
Used as a compact and faster data structure for token and position sets.
"""
class Span(Set):
"""
Represent ranges of integers (such as tokens positions) as a set of integers.
A Span is hashable and not meant to be modified once created, like a frozenset.
It is equivalent to a sparse closed interval.
Originally derived and heavily modified from Whoosh Span.
"""
def __init__(self, *args):
"""
Create a new Span from a start and end ints or an iterable of ints.
First form:
Span(start int, end int) : the span is initialized with a range(start, end+1)
Second form:
Span(iterable of ints) : the span is initialized with the iterable
Spans are hashable and immutable.
For example:
>>> s = Span(1)
>>> s.start
1
>>> s = Span([1, 2])
>>> s.start
1
>>> s.end
2
>>> s
Span(1, 2)
>>> s = Span(1, 3)
>>> s.start
1
>>> s.end
3
>>> s
Span(1, 3)
>>> s = Span([6, 5, 1, 2])
>>> s.start
1
>>> s.end
6
>>> s
Span(1, 2)|Span(5, 6)
>>> len(s)
4
>>> Span([5, 6, 7, 8, 9, 10 ,11, 12]) == Span([5, 6, 7, 8, 9, 10 ,11, 12])
True
>>> hash(Span([5, 6, 7, 8, 9, 10 ,11, 12])) == hash(Span([5, 6, 7, 8, 9, 10 ,11, 12]))
True
>>> hash(Span([5, 6, 7, 8, 9, 10 ,11, 12])) == hash(Span(5, 12))
True
"""
len_args = len(args)
if len_args == 0:
self._set = intbitset()
elif len_args == 1:
# args0 is a single int or an iterable of ints
if isinstance(args[0], int):
self._set = intbitset(args)
else:
# some sequence or iterable
self._set = intbitset(list(args[0]))
elif len_args == 2:
# args0 and args1 describe a start and end closed range
self._set = intbitset(range(args[0], args[1] + 1))
else:
# args0 is a single int or args is an iterable of ints
# args is an iterable of ints
self._set = intbitset(list(args))
@classmethod
def _from_iterable(cls, it):
return cls(list(it))
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._set)
def __hash__(self):
return hash(tuple(self._set))
def __eq__(self, other):
return isinstance(other, Span) and self._set == other._set
def __and__(self, *others):
return Span(self._set.intersection(*[o._set for o in others]))
def __or__(self, *others):
return Span(self._set.union(*[o._set for o in others]))
def union(self, *others):
"""
Return the union of this span with other spans as a new span.
(i.e. all positions that are in either spans.)
"""
return self.__or__(*others)
def difference(self, *others):
"""
Return the difference of two or more spans as a new span.
(i.e. all positions that are in this span but not the others.)
"""
return Span(self._set.difference(*[o._set for o in others]))
def __repr__(self):
"""
Return a brief representation of this span by only listing contiguous
spans and not all items.
For example:
>>> Span([1, 2, 3, 4, 5, 7, 8, 9, 10])
Span(1, 5)|Span(7, 10)
"""
subspans_repr = []
for subs in self.subspans():
ls = len(subs)
if not ls:
subspans_repr.append('Span()')
elif ls == 1:
subspans_repr.append('Span(%d)' % subs.start)
else:
subspans_repr.append('Span(%d, %d)' % (subs.start, subs.end))
return '|'.join(subspans_repr)
def __contains__(self, other):
"""
Return True if this span contains other span (where other is a Span, an
int or an ints set).
For example:
>>> Span([5, 7]) in Span(5, 7)
True
>>> Span([5, 8]) in Span([5, 7])
False
>>> 6 in Span([4, 5, 6, 7, 8])
True
>>> 2 in Span([4, 5, 6, 7, 8])
False
>>> 8 in Span([4, 8])
True
>>> 5 in Span([4, 8])
False
>>> set([4, 5]) in Span([4, 5, 6, 7, 8])
True
>>> set([9]) in Span([4, 8])
False
"""
if isinstance(other, Span):
return self._set.issuperset(other._set)
if isinstance(other, int):
return self._set.__contains__(other)
if isinstance(other, (set, frozenset)):
return self._set.issuperset(intbitset(other))
if isinstance(other, intbitset):
return self._set.issuperset(other)
@property
def set(self):
return self._set
def issubset(self, other):
return self._set.issubset(other._set)
def issuperset(self, other):
return self._set.issuperset(other._set)
@property
def start(self):
if not self._set:
raise TypeError('Empty Span has no start.')
return self._set[0]
@property
def end(self):
if not self._set:
raise TypeError('Empty Span has no end.')
return self._set[-1]
@classmethod
def sort(cls, spans):
"""
Return a new sorted sequence of spans given a sequence of spans.
The primary sort is on start. The secondary sort is on length.
If two spans have the same start, the longer span will sort first.
For example:
>>> spans = [Span([5, 6, 7, 8, 9, 10]), Span([1, 2]), Span([3, 4, 5]), Span([3, 4, 5, 6]), Span([8, 9, 10])]
>>> Span.sort(spans)
[Span(1, 2), Span(3, 6), Span(3, 5), Span(5, 10), Span(8, 10)]
>>> spans = [Span([1, 2]), Span([3, 4, 5]), Span([3, 4, 5, 6]), Span([8, 9, 10])]
>>> Span.sort(spans)
[Span(1, 2), Span(3, 6), Span(3, 5), Span(8, 10)]
>>> spans = [Span([1, 2]), Span([4, 5]), Span([7, 8]), Span([11, 12])]
>>> Span.sort(spans)
[Span(1, 2), Span(4, 5), Span(7, 8), Span(11, 12)]
>>> spans = [Span([1, 2]), Span([7, 8]), Span([5, 6]), Span([12, 13])]
>>> Span.sort(spans)
[Span(1, 2), Span(5, 6), Span(7, 8), Span(12, 13)]
"""
key = lambda s: (s.start, -len(s),)
return sorted(spans, key=key)
def magnitude(self):
"""
Return the maximal length represented by this span start and end. The
magnitude is the same as the length for a contiguous span. It will be
greater than the length for a span with non-contiguous int items.
An empty span has a zero magnitude.
For example:
>>> Span([4, 8]).magnitude()
5
>>> len(Span([4, 8]))
2
>>> len(Span([4, 5, 6, 7, 8]))
5
>>> Span([4, 5, 6, 14 , 12, 128]).magnitude()
125
>>> Span([4, 5, 6, 7, 8]).magnitude()
5
>>> Span([0]).magnitude()
1
>>> Span([0]).magnitude()
1
"""
if not self._set:
return 0
return self.end - self.start + 1
def density(self):
"""
Return the density of this span as a ratio of its length to its
magnitude, a float between 0 and 1. A dense Span has all its integer
items contiguous and a maximum density of one. A sparse low density span
has some non-contiguous integer items. An empty span has a zero density.
For example:
>>> Span([4, 8]).density()
0.4
>>> Span([4, 5, 6, 7, 8]).density()
1.0
>>> Span([0]).density()
1.0
>>> Span().density()
0
"""
if not self._set:
return 0
return len(self) / self.magnitude()
def overlap(self, other):
"""
Return the count of overlapping items between this span and other span.
For example:
>>> Span([1, 2]).overlap(Span([5, 6]))
0
>>> Span([5, 6]).overlap(Span([5, 6]))
2
>>> Span([4, 5, 6, 7]).overlap(Span([5, 6]))
2
>>> Span([4, 5, 6]).overlap(Span([5, 6, 7]))
2
>>> Span([4, 5, 6]).overlap(Span([6]))
1
>>> Span([4, 5]).overlap(Span([6, 7]))
0
"""
return len(self & other)
def resemblance(self, other):
"""
Return a resemblance coefficient as a float between 0 and 1.
0 means the spans are completely different and 1 identical.
"""
if self._set.isdisjoint(other._set):
return 0
if self._set == other._set:
return 1
resemblance = self.overlap(other) / len(self | other)
return resemblance
def containment(self, other):
"""
Return a containment coefficient as a float between 0 and 1. This is an
indication of how much of the other span is contained in this span.
- 1 means the other span is entirely contained in this span.
- 0 means that the other span is not contained at all this span.
"""
if self._set.isdisjoint(other._set):
return 0
if self._set == other._set:
return 1
containment = self.overlap(other) / len(other)
return containment
def surround(self, other):
"""
Return True if this span surrounds other span.
This is different from containment. A span can surround another span region
and have no positions in common with the surrounded.
For example:
>>> Span([4, 8]).surround(Span([4, 8]))
True
>>> Span([3, 9]).surround(Span([4, 8]))
True
>>> Span([5, 8]).surround(Span([4, 8]))
False
>>> Span([4, 7]).surround(Span([4, 8]))
False
>>> Span([4, 5, 6, 7, 8]).surround(Span([5, 6, 7]))
True
"""
return self.start <= other.start and self.end >= other.end
def is_before(self, other):
return self.end < other.start
def is_after(self, other):
return self.start > other.end
def touch(self, other):
"""
Return True if self sequence is contiguous with other span without overlap.
For example:
>>> Span([5, 7]).touch(Span([5]))
False
>>> Span([5, 7]).touch(Span([5, 8]))
False
>>> Span([5, 7]).touch(Span([7, 8]))
False
>>> Span([5, 7]).touch(Span([8, 9]))
True
>>> Span([8, 9]).touch(Span([5, 7]))
True
"""
return self.start == other.end + 1 or self.end == other.start - 1
def distance_to(self, other):
"""
Return the absolute positive distance from this span to other span.
Overlapping spans have a zero distance.
Non-overlapping touching spans have a distance of one.
For example:
>>> Span([8, 9]).distance_to(Span([5, 7]))
1
>>> Span([5, 7]).distance_to(Span([8, 9]))
1
>>> Span([5, 6]).distance_to(Span([8, 9]))
2
>>> Span([8, 9]).distance_to(Span([5, 6]))
2
>>> Span([5, 7]).distance_to(Span([5, 7]))
0
>>> Span([4, 5, 6]).distance_to(Span([5, 6, 7]))
0
>>> Span([5, 7]).distance_to(Span([10, 12]))
3
>>> Span([1, 2]).distance_to(Span(range(4, 52)))
2
"""
if self.overlap(other):
return 0
if self.touch(other):
return 1
if self.is_before(other):
return other.start - self.end
else:
return self.start - other.end
@staticmethod
def from_ints(ints):
"""
Return a sequence of Spans from an iterable of ints. A new Span is
created for each group of monotonously increasing int items.
>>> Span.from_ints([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
[Span(1, 12)]
>>> Span.from_ints([1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12])
[Span(1, 3), Span(5, 12)]
>>> Span.from_ints([0, 2, 3, 5, 6, 7, 8, 9, 10, 11, 13])
[Span(0), Span(2, 3), Span(5, 11), Span(13)]
"""
ints = sorted(set(ints))
groups = (group for _, group in groupby(ints, lambda group, c=count(): next(c) - group))
return [Span(g) for g in groups]
def subspans(self):
"""
Return a list of Spans creating one new Span for each set of contiguous
integer items.
For example:
>>> span = Span(5, 6, 7, 8, 9, 10) | Span([1, 2]) | Span(3, 5) | Span(3, 6) | Span([8, 9, 10])
>>> span.subspans()
[Span(1, 10)]
When subspans are not touching they do not merge :
>>> span = Span([63, 64]) | Span([58, 58])
>>> span.subspans()
[Span(58), Span(63, 64)]
Overlapping subspans are merged as needed:
>>> span = Span([12, 17, 24]) | Span([15, 16, 17, 35]) | Span(58) | Span(63, 64)
>>> span.subspans()
[Span(12), Span(15, 17), Span(24), Span(35), Span(58), Span(63, 64)]
"""
return Span.from_ints(self)
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/licensedcode/spans.py
| 0.91611 | 0.491334 |
spans.py
|
pypi
|
import os
import json
import pathlib
from datetime import datetime
from os.path import dirname
from os.path import join
from distutils.dir_util import copy_tree
import click
import saneyaml
from commoncode.cliutils import MISC_GROUP
from commoncode.cliutils import PluggableCommandLineOption
from jinja2 import Environment, FileSystemLoader
from licensedcode.models import load_licenses
from licensedcode.models import licenses_data_dir
from scancode_config import __version__ as scancode_version
from scancode_config import spdx_license_list_version
TEMPLATES_DIR = os.path.join(dirname(__file__), 'templates')
STATIC_DIR = os.path.join(dirname(__file__), 'static')
def write_file(path, filename, content):
path.joinpath(filename).open("w").write(content)
def now():
return datetime.utcnow().strftime('%Y-%m-%d')
base_context = {
"scancode_version": scancode_version,
"now": now(),
"spdx_license_list_version": spdx_license_list_version,
}
base_context_test = {
"scancode_version": "32.0.0b1",
"now": "Dec 22, 2022",
"spdx_license_list_version": "3.20",
}
def generate_indexes(output_path, environment, licenses, test=False):
"""
Generates the license index and the static website at ``output_path``.
``environment`` is a jinja Environment object used to generate the webpage
and ``licenses`` is a mapping with scancode license data.
"""
if test:
base_context_mapping = base_context_test
else:
base_context_mapping = base_context
static_dest_dir = join(output_path, 'static')
if not os.path.exists(static_dest_dir):
os.makedirs(static_dest_dir)
copy_tree(STATIC_DIR, static_dest_dir)
license_list_template = environment.get_template("license_list.html")
index_html = license_list_template.render(
**base_context_mapping,
licenses=licenses,
)
write_file(output_path, "index.html", index_html)
index = [
{
"license_key": key,
"category": lic.category,
"spdx_license_key": lic.spdx_license_key,
"other_spdx_license_keys": lic.other_spdx_license_keys,
"is_exception": lic.is_exception,
"is_deprecated": lic.is_deprecated,
"json": f"{key}.json",
"yaml": f"{key}.yml",
"html": f"{key}.html",
"license": f"{key}.LICENSE",
}
for key, lic in licenses.items()
]
write_file(
output_path,
"index.json",
json.dumps(index, indent=2, sort_keys=False)
)
write_file(
output_path,
"index.yml",
saneyaml.dump(index, indent=2)
)
return len(index)
def generate_details(output_path, environment, licenses, test=False):
"""
Dumps data at ``output_path`` in JSON, YAML and HTML formats and also dumps
the .LICENSE file with the license text and the data as YAML frontmatter.
``environment`` is a jinja Environment object used to generate the webpage
and ``licenses`` is a mapping with scancode license data.
``test`` is to generate a stable output for testing only
"""
from licensedcode.cache import get_cache
include_builtin = get_cache().has_additional_licenses
if test:
base_context_mapping = base_context_test
else:
base_context_mapping = base_context
license_details_template = environment.get_template("license_details.html")
for lic in licenses.values():
license_data = lic.to_dict(include_text=False, include_builtin=include_builtin)
license_data_with_text = lic.to_dict(include_text=True, include_builtin=include_builtin)
html = license_details_template.render(
**base_context_mapping,
license=lic,
license_data=license_data,
)
write_file(output_path, f"{lic.key}.html", html)
write_file(
output_path,
f"{lic.key}.yml",
saneyaml.dump(license_data_with_text, indent=2)
)
write_file(
output_path,
f"{lic.key}.json",
json.dumps(license_data_with_text, indent=2, sort_keys=False)
)
lic.dump(output_path)
def generate_help(output_path, environment, test=False):
"""
Generate a help.html with help text at ``output_path`` ``environment`` is a
jinja Environment object used to generate the webpage. ``test`` is to
generate a stable output for testing only
"""
if test:
base_context_mapping = base_context_test
else:
base_context_mapping = base_context
template = environment.get_template("help.html")
html = template.render(**base_context_mapping)
write_file(output_path, "help.html", html)
def generate(
build_location,
template_dir=TEMPLATES_DIR,
licenses_data_dir=licenses_data_dir,
test=False,
):
"""
Generate a licenseDB static website and dump license data at
``build_location`` given a license directory ``licenses_data_dir`` using
templates from ``template_dir``. ``test`` is to generate a stable output for
testing only
"""
if not os.path.exists(build_location):
os.makedirs(build_location)
env = Environment(
loader=FileSystemLoader(template_dir),
autoescape=True,
)
licenses = dict(sorted(
load_licenses(licenses_data_dir=licenses_data_dir, with_deprecated=True).items()
))
root_path = pathlib.Path(build_location)
root_path.mkdir(parents=False, exist_ok=True)
count = generate_indexes(output_path=root_path, environment=env, licenses=licenses, test=test)
generate_details(output_path=root_path, environment=env, licenses=licenses, test=test)
generate_help(output_path=root_path, environment=env, test=test)
return count
def scancode_license_data(path):
"""
Dump license data from scancode licenses to the directory ``path`` passed
in from command line.
Dumps data in JSON, YAML and HTML formats and also dumps the .LICENSE file
with the license text and the data as YAML frontmatter.
"""
click.secho(f'Dumping license data to: {path}', err=True)
count = generate(build_location=path)
click.secho(f'Done dumping #{count} licenses.', err=True)
@click.command(name='scancode-license-data')
@click.option(
'--path',
type=click.Path(exists=False, writable=True, file_okay=False, resolve_path=True, path_type=str),
metavar='DIR',
help='Dump the license data in this directory in the LicenseDB format and exit. '
'Creates the directory if it does not exist. ',
help_group=MISC_GROUP,
cls=PluggableCommandLineOption,
)
@click.help_option('-h', '--help')
def dump_scancode_license_data(
path,
*args,
**kwargs,
):
"""
Dump scancode license data in various formats, and the licenseDB static website at `path`.
"""
scancode_license_data(path=path)
if __name__ == '__main__':
dump_scancode_license_data()
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/licensedcode/license_db.py
| 0.499756 | 0.176175 |
license_db.py
|
pypi
|
from collections import namedtuple
import sys
import time
"""
Computes the difference between two texts. Originally based on
Diff Match and Patch
Copyright 2018 The diff-match-patch Authors.
original author [email protected] (Neil Fraser)
https://github.com/google/diff-match-patch
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Changes
2019-05-14: This file has been substantially modified.
All non-diff code has been removed.
Most methods have been moved to plain functions
A new difflib-like match_blocks function has been added
that works from sequences of ints.
"""
TRACE = False
def logger_debug(*args): pass
if TRACE:
import logging
logger = logging.getLogger(__name__)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
# The data structure representing a diff is an array of tuples:
# [(DIFF_DELETE, "Hello"), (DIFF_INSERT, "Goodbye"), (DIFF_EQUAL, " world.")]
# which means: delete "Hello", add "Goodbye" and keep " world."
DIFF_DELETE = -1
DIFF_INSERT = 1
DIFF_EQUAL = 0
Match = namedtuple('Match', 'a b size')
def match_blocks(a, b, a_start, a_end, *args, **kwargs):
"""
Return a list of matching block Match triples describing matching
subsequences of `a` in `b` starting from the `a_start` position in `a` up to
the `a_end` position in `a`.
"""
if TRACE:
logger_debug('a_start', a_start, 'a_end', a_end)
# convert sequences to strings
text1 = int2unicode(a[a_start:a_end])
text2 = int2unicode(b)
df = Differ(timeout=0.01)
diffs = df.difference(text1, text2)
diffs = trim(diffs)
apos = a_start
bpos = 0
matches = []
for op, matched_text in diffs:
size = len(matched_text)
if not size:
continue
if op == DIFF_EQUAL:
matches.append(Match(apos, bpos, size))
apos += size
bpos += size
elif op == DIFF_INSERT:
bpos += size
elif op == DIFF_DELETE:
apos += size
return matches
def int2unicode(nums):
"""
Convert an array of positive integers to a unicode string.
"""
return u''.join(chr(i + 1) for i in nums)
def trim(diffs):
"""
Remove trailing INSERT and DELETE from a list of diffs
"""
# FIXME: this may be best done in the main loop?
while diffs:
op, _ = diffs[-1]
if op in (DIFF_DELETE, DIFF_INSERT):
diffs.pop()
else:
break
return diffs
class Differ(object):
def __init__(self, timeout=0.1):
# Number of seconds to compute a diff before giving up (0 for infinity).
self.timeout = timeout
def difference(self, text1, text2, deadline=None):
"""
Find the differences between two texts. Simplifies the problem by
stripping any common prefix or suffix off the texts before diffing.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Optional time when the diff should be complete by. Used
internally for recursive calls. Users should set timeout instead.
Returns:
Array of changes.
"""
if text1 == None or text2 == None:
raise ValueError('Illegal empty inputs')
# Check for equality (speedup).
if text1 == text2:
if text1:
return [(DIFF_EQUAL, text1)]
return []
# Set a deadline by which time the diff must be complete.
if deadline == None:
# Unlike in most languages, Python counts time in seconds.
if not self.timeout:
deadline = sys.maxsize
else:
deadline = time.time() + self.timeout
# Trim off common prefix (speedup).
commonlength = common_prefix(text1, text2)
commonprefix = text1[:commonlength]
text1 = text1[commonlength:]
text2 = text2[commonlength:]
# Trim off common suffix (speedup).
commonlength = common_suffix(text1, text2)
if commonlength == 0:
commonsuffix = ''
else:
commonsuffix = text1[-commonlength:]
text1 = text1[:-commonlength]
text2 = text2[:-commonlength]
# Compute the diff on the middle block.
diffs = self.compute(text1, text2, deadline)
# Restore the prefix and suffix.
if commonprefix:
diffs[:0] = [(DIFF_EQUAL, commonprefix)]
if commonsuffix:
diffs.append((DIFF_EQUAL, commonsuffix))
diffs = merge(diffs)
return diffs
def compute(self, text1, text2, deadline):
"""
Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
if not text1:
# Just add some text (speedup).
return [(DIFF_INSERT, text2)]
if not text2:
# Just delete some text (speedup).
return [(DIFF_DELETE, text1)]
len_text1 = len(text1)
len_text2 = len(text2)
reversed_diff = len_text1 > len_text2
if reversed_diff:
longtext, shorttext = text1, text2
len_shorttext = len_text2
else:
shorttext, longtext = text1, text2
len_shorttext = len_text1
i = longtext.find(shorttext)
if i != -1:
# Shorter text is inside the longer text (speedup).
diffs = [(DIFF_INSERT, longtext[:i]),
(DIFF_EQUAL, shorttext),
(DIFF_INSERT, longtext[i + len_shorttext:])]
# Swap insertions for deletions if diff is reversed.
if reversed_diff:
diffs[0] = (DIFF_DELETE, diffs[0][1])
diffs[2] = (DIFF_DELETE, diffs[2][1])
return diffs
if len_shorttext == 1:
# Single character string.
# After the previous speedup, the character can't be an equality.
return [(DIFF_DELETE, text1), (DIFF_INSERT, text2)]
# Check to see if the problem can be split in two.
hm = half_match(text1, text2, len_text1, len_text2)
if hm:
# A half-match was found, sort out the return data.
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
# Send both pairs off for separate processing.
diffs_a = self.difference(text1_a, text2_a, deadline)
diffs_b = self.difference(text1_b, text2_b, deadline)
# Merge the results.
return diffs_a + [(DIFF_EQUAL, mid_common)] + diffs_b
return self.bisect(text1, text2, deadline, len_text1, len_text2)
def bisect(self, text1, text2, deadline, len_text1, len_text2):
"""
Find the 'middle snake' of a diff, split the problem in two
and return the recursively constructed diff.
See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
max_d = (len_text1 + len_text2 + 1) // 2
v_offset = max_d
v_length = 2 * max_d
v1 = [-1] * v_length
v1[v_offset + 1] = 0
v2 = v1[:]
delta = len_text1 - len_text2
# If the total number of characters is odd, then the front path will
# collide with the reverse path.
front = (delta % 2 != 0)
# Offsets for start and end of k loop.
# Prevents mapping of space beyond the grid.
k1start = 0
k1end = 0
k2start = 0
k2end = 0
for d in range(max_d):
# Bail out if deadline is reached.
if time.time() > deadline:
break
# Walk the front path one step.
for k1 in range(-d + k1start, d + 1 - k1end, 2):
k1_offset = v_offset + k1
if k1 == -d or (k1 != d and v1[k1_offset - 1] < v1[k1_offset + 1]):
x1 = v1[k1_offset + 1]
else:
x1 = v1[k1_offset - 1] + 1
y1 = x1 - k1
while (x1 < len_text1 and y1 < len_text2 and text1[x1] == text2[y1]):
x1 += 1
y1 += 1
v1[k1_offset] = x1
if x1 > len_text1:
# Ran off the right of the graph.
k1end += 2
elif y1 > len_text2:
# Ran off the bottom of the graph.
k1start += 2
elif front:
k2_offset = v_offset + delta - k1
if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1:
# Mirror x2 onto top-left coordinate system.
x2 = len_text1 - v2[k2_offset]
if x1 >= x2:
# Overlap detected.
return self.bisect_split(text1, text2, x1, y1, deadline)
# Walk the reverse path one step.
for k2 in range(-d + k2start, d + 1 - k2end, 2):
k2_offset = v_offset + k2
if k2 == -d or (k2 != d and v2[k2_offset - 1] < v2[k2_offset + 1]):
x2 = v2[k2_offset + 1]
else:
x2 = v2[k2_offset - 1] + 1
y2 = x2 - k2
while (x2 < len_text1 and y2 < len_text2 and text1[-x2 - 1] == text2[-y2 - 1]):
x2 += 1
y2 += 1
v2[k2_offset] = x2
if x2 > len_text1:
# Ran off the left of the graph.
k2end += 2
elif y2 > len_text2:
# Ran off the top of the graph.
k2start += 2
elif not front:
k1_offset = v_offset + delta - k2
if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1:
x1 = v1[k1_offset]
y1 = v_offset + x1 - k1_offset
# Mirror x2 onto top-left coordinate system.
x2 = len_text1 - x2
if x1 >= x2:
# Overlap detected.
return self.bisect_split(text1, text2, x1, y1, deadline)
# Diff took too long and hit the deadline or
# number of diffs equals number of characters, no commonality at all.
return [(DIFF_DELETE, text1), (DIFF_INSERT, text2)]
def bisect_split(self, text1, text2, x, y, deadline):
"""
Given the location of the 'middle snake', split the diff in two parts
and recurse.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
x: Index of split point in text1.
y: Index of split point in text2.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
text1a = text1[:x]
text2a = text2[:y]
text1b = text1[x:]
text2b = text2[y:]
# Compute both diffs serially.
diffs = self.difference(text1a, text2a, deadline)
diffsb = self.difference(text1b, text2b, deadline)
return diffs + diffsb
def half_match(text1, text2, len_text1, len_text2):
"""
Do the two texts share a substring which is at least half the length of
the longer text?
This speedup can produce non-minimal diffs.
Args:
text1: First string.
text2: Second string.
Returns:
Five element Array, containing the prefix of text1, the suffix of text1,
the prefix of text2, the suffix of text2 and the common middle. Or None
if there was no match.
"""
reversed_diff = len_text1 > len_text2
if reversed_diff:
longtext, shorttext = text1, text2
len_longtext, len_shorttext = len_text1, len_text2
else:
shorttext, longtext = text1, text2
len_shorttext, len_longtext = len_text1, len_text2
if len_longtext < 4 or len_shorttext * 2 < len_longtext:
# Pointless.
return None
# First check if the second quarter is the seed for a half-match.
hm1 = half_match_i(longtext, shorttext, (len_longtext + 3) // 4, len_longtext)
# Check again based on the third quarter.
hm2 = half_match_i(longtext, shorttext, (len_longtext + 1) // 2, len_longtext)
if not hm1 and not hm2:
return None
elif not hm2:
hm = hm1
elif not hm1:
hm = hm2
else:
# Both matched. Select the longest.
if len(hm1[4]) > len(hm2[4]):
hm = hm1
else:
hm = hm2
# A half-match was found, sort out the return data.
if reversed_diff:
text1_a, text1_b, text2_a, text2_b, mid_common = hm
else:
text2_a, text2_b, text1_a, text1_b, mid_common = hm
return text1_a, text1_b, text2_a, text2_b, mid_common
def half_match_i(longtext, shorttext, i, len_longtext):
"""
Does a substring of shorttext exist within longtext such that the substring
is at least half the length of longtext?
Args:
longtext: Longer string.
shorttext: Shorter string.
i: Start index of quarter length substring within longtext.
Returns:
Five element Array, containing:
- the prefix of longtext,
- the suffix of longtext,
- the prefix of shorttext,
- the suffix of shorttext
- the common middle.
Or None if there was no match.
"""
seed = longtext[i:i + len_longtext // 4]
best_common = ''
j = shorttext.find(seed)
while j != -1:
prefixLength = common_prefix(longtext[i:], shorttext[j:])
suffixLength = common_suffix(longtext[:i], shorttext[:j])
if len(best_common) < suffixLength + prefixLength:
best_common = (shorttext[j - suffixLength:j] + shorttext[j:j + prefixLength])
best_longtext_a = longtext[:i - suffixLength]
best_longtext_b = longtext[i + prefixLength:]
best_shorttext_a = shorttext[:j - suffixLength]
best_shorttext_b = shorttext[j + prefixLength:]
j = shorttext.find(seed, j + 1)
if len(best_common) * 2 >= len_longtext:
return (
best_longtext_a, best_longtext_b,
best_shorttext_a, best_shorttext_b,
best_common)
def cleanup_efficiency(diffs, editcost=4):
"""
Reduce the number of edits by eliminating operationally trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
# Stack of indices where equalities are found.
equalities = []
# Always equal to diffs[equalities[-1]][1]
last_equality = None
# Index of current position.
pointer = 0
# Is there an insertion operation before the last equality.
pre_ins = False
# Is there a deletion operation before the last equality.
pre_del = False
# Is there an insertion operation after the last equality.
post_ins = False
# Is there a deletion operation after the last equality.
post_del = False
while pointer < len(diffs):
if diffs[pointer][0] == DIFF_EQUAL: # Equality found.
if (len(diffs[pointer][1]) < editcost and (post_ins or post_del)):
# Candidate found.
equalities.append(pointer)
pre_ins = post_ins
pre_del = post_del
last_equality = diffs[pointer][1]
else:
# Not a candidate, and can never become one.
equalities = []
last_equality = None
post_ins = post_del = False
else: # An insertion or deletion.
if diffs[pointer][0] == DIFF_DELETE:
post_del = True
else:
post_ins = True
# Five types to be split:
# <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del>
# <ins>A</ins>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<ins>C</ins>
# <ins>A</del>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<del>C</del>
if last_equality and (
(pre_ins and pre_del and post_ins and post_del)
or
((len(last_equality) < editcost / 2)
and (pre_ins + pre_del + post_ins + post_del) == 3)):
# Duplicate record.
diffs.insert(equalities[-1], (DIFF_DELETE, last_equality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (DIFF_INSERT, diffs[equalities[-1] + 1][1])
# Throw away the equality we just deleted.
equalities.pop()
last_equality = None
if pre_ins and pre_del:
# No changes made which could affect previous entry, keep going.
post_ins = post_del = True
equalities = []
else:
if equalities:
# Throw away the previous equality.
equalities.pop()
if equalities:
pointer = equalities[-1]
else:
pointer = -1
post_ins = post_del = False
changes = True
pointer += 1
if changes:
diffs = merge(diffs)
return diffs
def common_prefix(text1, text2):
"""
Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[0] != text2[0]:
return 0
# Binary search.
# Performance analysis: https://neil.fraser.name/news/2007/10/09/
pointermin = 0
# TODO: move as args
len_text1 = len(text1)
len_text2 = len(text2)
pointermax = min(len_text1, len_text2)
pointermid = pointermax
pointerstart = 0
while pointermin < pointermid:
if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
pointermin = pointermid
pointerstart = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def common_suffix(text1, text2):
"""
Determine the common suffix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the end of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[-1] != text2[-1]:
return 0
# Binary search.
# Performance analysis: https://neil.fraser.name/news/2007/10/09/
pointermin = 0
# TODO: move as args
len_text1 = len(text1)
len_text2 = len(text2)
pointermax = min(len_text1, len_text2)
pointermid = pointermax
pointerend = 0
while pointermin < pointermid:
if (text1[-pointermid:len_text1 - pointerend] == text2[-pointermid:len(text2) - pointerend]):
pointermin = pointermid
pointerend = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def merge(diffs):
"""
Reorder and merge like edit sections in place. Merge equalities.
Any edit section can move as long as it doesn't cross an equality.
Return the merged diffs sequence.
Args:
diffs: Array of diff tuples.
"""
diffs.append((DIFF_EQUAL, '')) # Add a dummy entry at the end.
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete + count_insert > 1:
if count_delete != 0 and count_insert != 0:
# Factor out any common prefixies.
commonlength = common_prefix(text_insert, text_delete)
if commonlength != 0:
x = pointer - count_delete - count_insert - 1
if x >= 0 and diffs[x][0] == DIFF_EQUAL:
diffs[x] = (
diffs[x][0],
diffs[x][1] + text_insert[:commonlength])
else:
diffs.insert(0, (DIFF_EQUAL, text_insert[:commonlength]))
pointer += 1
text_insert = text_insert[commonlength:]
text_delete = text_delete[commonlength:]
# Factor out any common suffixies.
commonlength = common_suffix(text_insert, text_delete)
if commonlength != 0:
diffs[pointer] = (
diffs[pointer][0],
text_insert[-commonlength:] + diffs[pointer][1])
text_insert = text_insert[:-commonlength]
text_delete = text_delete[:-commonlength]
# Delete the offending records and add the merged ones.
new_ops = []
if len(text_delete) != 0:
new_ops.append((DIFF_DELETE, text_delete))
if len(text_insert) != 0:
new_ops.append((DIFF_INSERT, text_insert))
pointer -= count_delete + count_insert
diffs[pointer:pointer + count_delete + count_insert] = new_ops
pointer += len(new_ops) + 1
elif pointer != 0 and diffs[pointer - 1][0] == DIFF_EQUAL:
# Merge this equality with the previous one.
diffs[pointer - 1] = (
diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer][1])
del diffs[pointer]
else:
pointer += 1
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
if diffs[-1][1] == '':
diffs.pop() # Remove the dummy entry at the end.
# Second pass: look for single edits surrounded on both sides by equalities
# which can be shifted sideways to eliminate an equality.
# e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC
changes = False
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == DIFF_EQUAL and diffs[pointer + 1][0] == DIFF_EQUAL):
# This is a single edit surrounded by equalities.
if diffs[pointer][1].endswith(diffs[pointer - 1][1]):
# Shift the edit over the previous equality.
if diffs[pointer - 1][1] != "":
diffs[pointer] = (
diffs[pointer][0],
diffs[pointer - 1][1] + diffs[pointer][1][:-len(diffs[pointer - 1][1])])
diffs[pointer + 1] = (
diffs[pointer + 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
del diffs[pointer - 1]
changes = True
elif diffs[pointer][1].startswith(diffs[pointer + 1][1]):
# Shift the edit over the next equality.
diffs[pointer - 1] = (
diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
diffs[pointer] = (
diffs[pointer][0],
diffs[pointer][1][len(diffs[pointer + 1][1]):] + diffs[pointer + 1][1])
del diffs[pointer + 1]
changes = True
pointer += 1
# If shifts were made, the diff needs reordering and another shift sweep.
if changes:
diffs = merge(diffs)
return diffs
def levenshtein_distance(diffs):
"""
Compute the Levenshtein distance; the number of inserted, deleted or
substituted characters.
Args:
diffs: Array of diff tuples.
Returns:
Number of changes.
"""
levenshtein = 0
insertions = 0
deletions = 0
for (op, data) in diffs:
if op == DIFF_INSERT:
insertions += len(data)
elif op == DIFF_DELETE:
deletions += len(data)
elif op == DIFF_EQUAL:
# A deletion and an insertion is one substitution.
levenshtein += max(insertions, deletions)
insertions = 0
deletions = 0
levenshtein += max(insertions, deletions)
return levenshtein
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/licensedcode/dmp.py
| 0.721056 | 0.490724 |
dmp.py
|
pypi
|
from time import time
import sys
from licensedcode.match import LicenseMatch
from licensedcode.spans import Span
TRACE = False
TRACE2 = False
TRACE3 = False
def logger_debug(*args): pass
if TRACE or TRACE2 or TRACE3:
use_print = True
if use_print:
prn = print
else:
import logging
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
prn = logger.debug
def logger_debug(*args):
return prn(' '.join(isinstance(a, str) and a or repr(a) for a in args))
"""
Matching strategy using pair-wise multiple local sequences alignment and diff-
like approaches.
"""
MATCH_SEQ = '3-seq'
def match_sequence(idx, rule, query_run, high_postings, start_offset=0,
match_blocks=None, deadline=sys.maxsize):
"""
Return a list of LicenseMatch by matching the `query_run` tokens sequence
starting at `start_offset` against the `idx` index for the candidate `rule`.
Stop processing when reachin the deadline time.
"""
if not rule:
return []
if not match_blocks:
from licensedcode.seq import match_blocks
rid = rule.rid
itokens = idx.tids_by_rid[rid]
len_legalese = idx.len_legalese
qbegin = query_run.start + start_offset
qfinish = query_run.end
qtokens = query_run.query.tokens
query = query_run.query
matches = []
qstart = qbegin
# match as long as long we find alignments and have high matchable tokens
# this allows to find repeated instances of the same rule in the query run
while qstart <= qfinish:
if TRACE2:
logger_debug('\n\nmatch_seq:==========================LOOP=============================')
if not query_run.is_matchable(include_low=False):
break
if TRACE2:
logger_debug('match_seq:running block_matches:', 'a_start:', qstart, 'a_end', qfinish + 1)
block_matches = match_blocks(
a=qtokens, b=itokens, a_start=qstart, a_end=qfinish + 1,
b2j=high_postings, len_good=len_legalese,
matchables=query_run.matchables)
if not block_matches:
break
# create one match for each matching block: they will be further merged
# at LicenseMatch merging and filtering time
for qpos, ipos, mlen in block_matches:
qspan_end = qpos + mlen
# skip single non-high word matched as as sequence
if mlen > 1 or (mlen == 1 and qtokens[qpos] < len_legalese):
qspan = Span(range(qpos, qspan_end))
ispan = Span(range(ipos, ipos + mlen))
hispan = Span(p for p in ispan if itokens[p] < len_legalese)
match = LicenseMatch(
rule, qspan, ispan, hispan, qbegin,
matcher=MATCH_SEQ, query=query)
matches.append(match)
if TRACE2:
from licensedcode.tracing import get_texts
qt, it = get_texts(match)
logger_debug('###########################')
logger_debug(match)
logger_debug('###########################')
logger_debug(qt)
logger_debug('###########################')
logger_debug(it)
logger_debug('###########################')
qstart = max([qstart, qspan_end])
if time() > deadline:
break
if time() > deadline:
break
if TRACE:
logger_debug('match_seq: FINAL LicenseMatch(es)')
for m in matches:
logger_debug(m)
logger_debug('\n\n')
return matches
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/licensedcode/match_seq.py
| 0.510008 | 0.242878 |
match_seq.py
|
pypi
|
from __future__ import absolute_import, division, print_function
from ._compat import PY2
from ._make import NOTHING, Factory, pipe
if not PY2:
import inspect
import typing
__all__ = [
"default_if_none",
"optional",
"pipe",
"to_bool",
]
def optional(converter):
"""
A converter that allows an attribute to be optional. An optional attribute
is one which can be set to ``None``.
Type annotations will be inferred from the wrapped converter's, if it
has any.
:param callable converter: the converter that is used for non-``None``
values.
.. versionadded:: 17.1.0
"""
def optional_converter(val):
if val is None:
return None
return converter(val)
if not PY2:
sig = None
try:
sig = inspect.signature(converter)
except (ValueError, TypeError): # inspect failed
pass
if sig:
params = list(sig.parameters.values())
if params and params[0].annotation is not inspect.Parameter.empty:
optional_converter.__annotations__["val"] = typing.Optional[
params[0].annotation
]
if sig.return_annotation is not inspect.Signature.empty:
optional_converter.__annotations__["return"] = typing.Optional[
sig.return_annotation
]
return optional_converter
def default_if_none(default=NOTHING, factory=None):
"""
A converter that allows to replace ``None`` values by *default* or the
result of *factory*.
:param default: Value to be used if ``None`` is passed. Passing an instance
of `attrs.Factory` is supported, however the ``takes_self`` option
is *not*.
:param callable factory: A callable that takes no parameters whose result
is used if ``None`` is passed.
:raises TypeError: If **neither** *default* or *factory* is passed.
:raises TypeError: If **both** *default* and *factory* are passed.
:raises ValueError: If an instance of `attrs.Factory` is passed with
``takes_self=True``.
.. versionadded:: 18.2.0
"""
if default is NOTHING and factory is None:
raise TypeError("Must pass either `default` or `factory`.")
if default is not NOTHING and factory is not None:
raise TypeError(
"Must pass either `default` or `factory` but not both."
)
if factory is not None:
default = Factory(factory)
if isinstance(default, Factory):
if default.takes_self:
raise ValueError(
"`takes_self` is not supported by default_if_none."
)
def default_if_none_converter(val):
if val is not None:
return val
return default.factory()
else:
def default_if_none_converter(val):
if val is not None:
return val
return default
return default_if_none_converter
def to_bool(val):
"""
Convert "boolean" strings (e.g., from env. vars.) to real booleans.
Values mapping to :code:`True`:
- :code:`True`
- :code:`"true"` / :code:`"t"`
- :code:`"yes"` / :code:`"y"`
- :code:`"on"`
- :code:`"1"`
- :code:`1`
Values mapping to :code:`False`:
- :code:`False`
- :code:`"false"` / :code:`"f"`
- :code:`"no"` / :code:`"n"`
- :code:`"off"`
- :code:`"0"`
- :code:`0`
:raises ValueError: for any other value.
.. versionadded:: 21.3.0
"""
if isinstance(val, str):
val = val.lower()
truthy = {True, "true", "t", "yes", "y", "on", "1", 1}
falsy = {False, "false", "f", "no", "n", "off", "0", 0}
try:
if val in truthy:
return True
if val in falsy:
return False
except TypeError:
# Raised when "val" is not hashable (e.g., lists)
pass
raise ValueError("Cannot convert value to bool: {}".format(val))
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/licensedcode/_vendor/attr/converters.py
| 0.850142 | 0.249207 |
converters.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import operator
import re
from contextlib import contextmanager
from ._config import get_run_validators, set_run_validators
from ._make import _AndValidator, and_, attrib, attrs
from .exceptions import NotCallableError
try:
Pattern = re.Pattern
except AttributeError: # Python <3.7 lacks a Pattern type.
Pattern = type(re.compile(""))
__all__ = [
"and_",
"deep_iterable",
"deep_mapping",
"disabled",
"ge",
"get_disabled",
"gt",
"in_",
"instance_of",
"is_callable",
"le",
"lt",
"matches_re",
"max_len",
"optional",
"provides",
"set_disabled",
]
def set_disabled(disabled):
"""
Globally disable or enable running validators.
By default, they are run.
:param disabled: If ``True``, disable running all validators.
:type disabled: bool
.. warning::
This function is not thread-safe!
.. versionadded:: 21.3.0
"""
set_run_validators(not disabled)
def get_disabled():
"""
Return a bool indicating whether validators are currently disabled or not.
:return: ``True`` if validators are currently disabled.
:rtype: bool
.. versionadded:: 21.3.0
"""
return not get_run_validators()
@contextmanager
def disabled():
"""
Context manager that disables running validators within its context.
.. warning::
This context manager is not thread-safe!
.. versionadded:: 21.3.0
"""
set_run_validators(False)
try:
yield
finally:
set_run_validators(True)
@attrs(repr=False, slots=True, hash=True)
class _InstanceOfValidator(object):
type = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not isinstance(value, self.type):
raise TypeError(
"'{name}' must be {type!r} (got {value!r} that is a "
"{actual!r}).".format(
name=attr.name,
type=self.type,
actual=value.__class__,
value=value,
),
attr,
self.type,
value,
)
def __repr__(self):
return "<instance_of validator for type {type!r}>".format(
type=self.type
)
def instance_of(type):
"""
A validator that raises a `TypeError` if the initializer is called
with a wrong type for this particular attribute (checks are performed using
`isinstance` therefore it's also valid to pass a tuple of types).
:param type: The type to check for.
:type type: type or tuple of types
:raises TypeError: With a human readable error message, the attribute
(of type `attrs.Attribute`), the expected type, and the value it
got.
"""
return _InstanceOfValidator(type)
@attrs(repr=False, frozen=True, slots=True)
class _MatchesReValidator(object):
pattern = attrib()
match_func = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.match_func(value):
raise ValueError(
"'{name}' must match regex {pattern!r}"
" ({value!r} doesn't)".format(
name=attr.name, pattern=self.pattern.pattern, value=value
),
attr,
self.pattern,
value,
)
def __repr__(self):
return "<matches_re validator for pattern {pattern!r}>".format(
pattern=self.pattern
)
def matches_re(regex, flags=0, func=None):
r"""
A validator that raises `ValueError` if the initializer is called
with a string that doesn't match *regex*.
:param regex: a regex string or precompiled pattern to match against
:param int flags: flags that will be passed to the underlying re function
(default 0)
:param callable func: which underlying `re` function to call (options
are `re.fullmatch`, `re.search`, `re.match`, default
is ``None`` which means either `re.fullmatch` or an emulation of
it on Python 2). For performance reasons, they won't be used directly
but on a pre-`re.compile`\ ed pattern.
.. versionadded:: 19.2.0
.. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern.
"""
fullmatch = getattr(re, "fullmatch", None)
valid_funcs = (fullmatch, None, re.search, re.match)
if func not in valid_funcs:
raise ValueError(
"'func' must be one of {}.".format(
", ".join(
sorted(
e and e.__name__ or "None" for e in set(valid_funcs)
)
)
)
)
if isinstance(regex, Pattern):
if flags:
raise TypeError(
"'flags' can only be used with a string pattern; "
"pass flags to re.compile() instead"
)
pattern = regex
else:
pattern = re.compile(regex, flags)
if func is re.match:
match_func = pattern.match
elif func is re.search:
match_func = pattern.search
elif fullmatch:
match_func = pattern.fullmatch
else: # Python 2 fullmatch emulation (https://bugs.python.org/issue16203)
pattern = re.compile(
r"(?:{})\Z".format(pattern.pattern), pattern.flags
)
match_func = pattern.match
return _MatchesReValidator(pattern, match_func)
@attrs(repr=False, slots=True, hash=True)
class _ProvidesValidator(object):
interface = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.interface.providedBy(value):
raise TypeError(
"'{name}' must provide {interface!r} which {value!r} "
"doesn't.".format(
name=attr.name, interface=self.interface, value=value
),
attr,
self.interface,
value,
)
def __repr__(self):
return "<provides validator for interface {interface!r}>".format(
interface=self.interface
)
def provides(interface):
"""
A validator that raises a `TypeError` if the initializer is called
with an object that does not provide the requested *interface* (checks are
performed using ``interface.providedBy(value)`` (see `zope.interface
<https://zopeinterface.readthedocs.io/en/latest/>`_).
:param interface: The interface to check for.
:type interface: ``zope.interface.Interface``
:raises TypeError: With a human readable error message, the attribute
(of type `attrs.Attribute`), the expected interface, and the
value it got.
"""
return _ProvidesValidator(interface)
@attrs(repr=False, slots=True, hash=True)
class _OptionalValidator(object):
validator = attrib()
def __call__(self, inst, attr, value):
if value is None:
return
self.validator(inst, attr, value)
def __repr__(self):
return "<optional validator for {what} or None>".format(
what=repr(self.validator)
)
def optional(validator):
"""
A validator that makes an attribute optional. An optional attribute is one
which can be set to ``None`` in addition to satisfying the requirements of
the sub-validator.
:param validator: A validator (or a list of validators) that is used for
non-``None`` values.
:type validator: callable or `list` of callables.
.. versionadded:: 15.1.0
.. versionchanged:: 17.1.0 *validator* can be a list of validators.
"""
if isinstance(validator, list):
return _OptionalValidator(_AndValidator(validator))
return _OptionalValidator(validator)
@attrs(repr=False, slots=True, hash=True)
class _InValidator(object):
options = attrib()
def __call__(self, inst, attr, value):
try:
in_options = value in self.options
except TypeError: # e.g. `1 in "abc"`
in_options = False
if not in_options:
raise ValueError(
"'{name}' must be in {options!r} (got {value!r})".format(
name=attr.name, options=self.options, value=value
)
)
def __repr__(self):
return "<in_ validator with options {options!r}>".format(
options=self.options
)
def in_(options):
"""
A validator that raises a `ValueError` if the initializer is called
with a value that does not belong in the options provided. The check is
performed using ``value in options``.
:param options: Allowed options.
:type options: list, tuple, `enum.Enum`, ...
:raises ValueError: With a human readable error message, the attribute (of
type `attrs.Attribute`), the expected options, and the value it
got.
.. versionadded:: 17.1.0
"""
return _InValidator(options)
@attrs(repr=False, slots=False, hash=True)
class _IsCallableValidator(object):
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not callable(value):
message = (
"'{name}' must be callable "
"(got {value!r} that is a {actual!r})."
)
raise NotCallableError(
msg=message.format(
name=attr.name, value=value, actual=value.__class__
),
value=value,
)
def __repr__(self):
return "<is_callable validator>"
def is_callable():
"""
A validator that raises a `attr.exceptions.NotCallableError` if the
initializer is called with a value for this particular attribute
that is not callable.
.. versionadded:: 19.1.0
:raises `attr.exceptions.NotCallableError`: With a human readable error
message containing the attribute (`attrs.Attribute`) name,
and the value it got.
"""
return _IsCallableValidator()
@attrs(repr=False, slots=True, hash=True)
class _DeepIterable(object):
member_validator = attrib(validator=is_callable())
iterable_validator = attrib(
default=None, validator=optional(is_callable())
)
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.iterable_validator is not None:
self.iterable_validator(inst, attr, value)
for member in value:
self.member_validator(inst, attr, member)
def __repr__(self):
iterable_identifier = (
""
if self.iterable_validator is None
else " {iterable!r}".format(iterable=self.iterable_validator)
)
return (
"<deep_iterable validator for{iterable_identifier}"
" iterables of {member!r}>"
).format(
iterable_identifier=iterable_identifier,
member=self.member_validator,
)
def deep_iterable(member_validator, iterable_validator=None):
"""
A validator that performs deep validation of an iterable.
:param member_validator: Validator to apply to iterable members
:param iterable_validator: Validator to apply to iterable itself
(optional)
.. versionadded:: 19.1.0
:raises TypeError: if any sub-validators fail
"""
return _DeepIterable(member_validator, iterable_validator)
@attrs(repr=False, slots=True, hash=True)
class _DeepMapping(object):
key_validator = attrib(validator=is_callable())
value_validator = attrib(validator=is_callable())
mapping_validator = attrib(default=None, validator=optional(is_callable()))
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.mapping_validator is not None:
self.mapping_validator(inst, attr, value)
for key in value:
self.key_validator(inst, attr, key)
self.value_validator(inst, attr, value[key])
def __repr__(self):
return (
"<deep_mapping validator for objects mapping {key!r} to {value!r}>"
).format(key=self.key_validator, value=self.value_validator)
def deep_mapping(key_validator, value_validator, mapping_validator=None):
"""
A validator that performs deep validation of a dictionary.
:param key_validator: Validator to apply to dictionary keys
:param value_validator: Validator to apply to dictionary values
:param mapping_validator: Validator to apply to top-level mapping
attribute (optional)
.. versionadded:: 19.1.0
:raises TypeError: if any sub-validators fail
"""
return _DeepMapping(key_validator, value_validator, mapping_validator)
@attrs(repr=False, frozen=True, slots=True)
class _NumberValidator(object):
bound = attrib()
compare_op = attrib()
compare_func = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.compare_func(value, self.bound):
raise ValueError(
"'{name}' must be {op} {bound}: {value}".format(
name=attr.name,
op=self.compare_op,
bound=self.bound,
value=value,
)
)
def __repr__(self):
return "<Validator for x {op} {bound}>".format(
op=self.compare_op, bound=self.bound
)
def lt(val):
"""
A validator that raises `ValueError` if the initializer is called
with a number larger or equal to *val*.
:param val: Exclusive upper bound for values
.. versionadded:: 21.3.0
"""
return _NumberValidator(val, "<", operator.lt)
def le(val):
"""
A validator that raises `ValueError` if the initializer is called
with a number greater than *val*.
:param val: Inclusive upper bound for values
.. versionadded:: 21.3.0
"""
return _NumberValidator(val, "<=", operator.le)
def ge(val):
"""
A validator that raises `ValueError` if the initializer is called
with a number smaller than *val*.
:param val: Inclusive lower bound for values
.. versionadded:: 21.3.0
"""
return _NumberValidator(val, ">=", operator.ge)
def gt(val):
"""
A validator that raises `ValueError` if the initializer is called
with a number smaller or equal to *val*.
:param val: Exclusive lower bound for values
.. versionadded:: 21.3.0
"""
return _NumberValidator(val, ">", operator.gt)
@attrs(repr=False, frozen=True, slots=True)
class _MaxLengthValidator(object):
max_length = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if len(value) > self.max_length:
raise ValueError(
"Length of '{name}' must be <= {max}: {len}".format(
name=attr.name, max=self.max_length, len=len(value)
)
)
def __repr__(self):
return "<max_len validator for {max}>".format(max=self.max_length)
def max_len(length):
"""
A validator that raises `ValueError` if the initializer is called
with a string or iterable that is longer than *length*.
:param int length: Maximum length of the string or iterable
.. versionadded:: 21.3.0
"""
return _MaxLengthValidator(length)
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/licensedcode/_vendor/attr/validators.py
| 0.844569 | 0.261016 |
validators.py
|
pypi
|
from __future__ import absolute_import, division, print_function
from functools import total_ordering
from ._funcs import astuple
from ._make import attrib, attrs
@total_ordering
@attrs(eq=False, order=False, slots=True, frozen=True)
class VersionInfo(object):
"""
A version object that can be compared to tuple of length 1--4:
>>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2)
True
>>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1)
True
>>> vi = attr.VersionInfo(19, 2, 0, "final")
>>> vi < (19, 1, 1)
False
>>> vi < (19,)
False
>>> vi == (19, 2,)
True
>>> vi == (19, 2, 1)
False
.. versionadded:: 19.2
"""
year = attrib(type=int)
minor = attrib(type=int)
micro = attrib(type=int)
releaselevel = attrib(type=str)
@classmethod
def _from_version_string(cls, s):
"""
Parse *s* and return a _VersionInfo.
"""
v = s.split(".")
if len(v) == 3:
v.append("final")
return cls(
year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3]
)
def _ensure_tuple(self, other):
"""
Ensure *other* is a tuple of a valid length.
Returns a possibly transformed *other* and ourselves as a tuple of
the same length as *other*.
"""
if self.__class__ is other.__class__:
other = astuple(other)
if not isinstance(other, tuple):
raise NotImplementedError
if not (1 <= len(other) <= 4):
raise NotImplementedError
return astuple(self)[: len(other)], other
def __eq__(self, other):
try:
us, them = self._ensure_tuple(other)
except NotImplementedError:
return NotImplemented
return us == them
def __lt__(self, other):
try:
us, them = self._ensure_tuple(other)
except NotImplementedError:
return NotImplemented
# Since alphabetically "dev0" < "final" < "post1" < "post2", we don't
# have to do anything special with releaselevel for now.
return us < them
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/licensedcode/_vendor/attr/_version_info.py
| 0.805517 | 0.209389 |
_version_info.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import copy
from ._compat import iteritems
from ._make import NOTHING, _obj_setattr, fields
from .exceptions import AttrsAttributeNotFoundError
def asdict(
inst,
recurse=True,
filter=None,
dict_factory=dict,
retain_collection_types=False,
value_serializer=None,
):
"""
Return the ``attrs`` attribute values of *inst* as a dict.
Optionally recurse into other ``attrs``-decorated classes.
:param inst: Instance of an ``attrs``-decorated class.
:param bool recurse: Recurse into classes that are also
``attrs``-decorated.
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the `attrs.Attribute` as the first argument and the
value as the second argument.
:param callable dict_factory: A callable to produce dictionaries from. For
example, to produce ordered dictionaries instead of normal Python
dictionaries, pass in ``collections.OrderedDict``.
:param bool retain_collection_types: Do not convert to ``list`` when
encountering an attribute whose type is ``tuple`` or ``set``. Only
meaningful if ``recurse`` is ``True``.
:param Optional[callable] value_serializer: A hook that is called for every
attribute or dict key/value. It receives the current instance, field
and value and must return the (updated) value. The hook is run *after*
the optional *filter* has been applied.
:rtype: return type of *dict_factory*
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 16.0.0 *dict_factory*
.. versionadded:: 16.1.0 *retain_collection_types*
.. versionadded:: 20.3.0 *value_serializer*
.. versionadded:: 21.3.0 If a dict has a collection for a key, it is
serialized as a tuple.
"""
attrs = fields(inst.__class__)
rv = dict_factory()
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if value_serializer is not None:
v = value_serializer(inst, a, v)
if recurse is True:
if has(v.__class__):
rv[a.name] = asdict(
v,
recurse=True,
filter=filter,
dict_factory=dict_factory,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
)
elif isinstance(v, (tuple, list, set, frozenset)):
cf = v.__class__ if retain_collection_types is True else list
rv[a.name] = cf(
[
_asdict_anything(
i,
is_key=False,
filter=filter,
dict_factory=dict_factory,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
)
for i in v
]
)
elif isinstance(v, dict):
df = dict_factory
rv[a.name] = df(
(
_asdict_anything(
kk,
is_key=True,
filter=filter,
dict_factory=df,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
),
_asdict_anything(
vv,
is_key=False,
filter=filter,
dict_factory=df,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
),
)
for kk, vv in iteritems(v)
)
else:
rv[a.name] = v
else:
rv[a.name] = v
return rv
def _asdict_anything(
val,
is_key,
filter,
dict_factory,
retain_collection_types,
value_serializer,
):
"""
``asdict`` only works on attrs instances, this works on anything.
"""
if getattr(val.__class__, "__attrs_attrs__", None) is not None:
# Attrs class.
rv = asdict(
val,
recurse=True,
filter=filter,
dict_factory=dict_factory,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
)
elif isinstance(val, (tuple, list, set, frozenset)):
if retain_collection_types is True:
cf = val.__class__
elif is_key:
cf = tuple
else:
cf = list
rv = cf(
[
_asdict_anything(
i,
is_key=False,
filter=filter,
dict_factory=dict_factory,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
)
for i in val
]
)
elif isinstance(val, dict):
df = dict_factory
rv = df(
(
_asdict_anything(
kk,
is_key=True,
filter=filter,
dict_factory=df,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
),
_asdict_anything(
vv,
is_key=False,
filter=filter,
dict_factory=df,
retain_collection_types=retain_collection_types,
value_serializer=value_serializer,
),
)
for kk, vv in iteritems(val)
)
else:
rv = val
if value_serializer is not None:
rv = value_serializer(None, None, rv)
return rv
def astuple(
inst,
recurse=True,
filter=None,
tuple_factory=tuple,
retain_collection_types=False,
):
"""
Return the ``attrs`` attribute values of *inst* as a tuple.
Optionally recurse into other ``attrs``-decorated classes.
:param inst: Instance of an ``attrs``-decorated class.
:param bool recurse: Recurse into classes that are also
``attrs``-decorated.
:param callable filter: A callable whose return code determines whether an
attribute or element is included (``True``) or dropped (``False``). Is
called with the `attrs.Attribute` as the first argument and the
value as the second argument.
:param callable tuple_factory: A callable to produce tuples from. For
example, to produce lists instead of tuples.
:param bool retain_collection_types: Do not convert to ``list``
or ``dict`` when encountering an attribute which type is
``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is
``True``.
:rtype: return type of *tuple_factory*
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 16.2.0
"""
attrs = fields(inst.__class__)
rv = []
retain = retain_collection_types # Very long. :/
for a in attrs:
v = getattr(inst, a.name)
if filter is not None and not filter(a, v):
continue
if recurse is True:
if has(v.__class__):
rv.append(
astuple(
v,
recurse=True,
filter=filter,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
)
elif isinstance(v, (tuple, list, set, frozenset)):
cf = v.__class__ if retain is True else list
rv.append(
cf(
[
astuple(
j,
recurse=True,
filter=filter,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(j.__class__)
else j
for j in v
]
)
)
elif isinstance(v, dict):
df = v.__class__ if retain is True else dict
rv.append(
df(
(
astuple(
kk,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(kk.__class__)
else kk,
astuple(
vv,
tuple_factory=tuple_factory,
retain_collection_types=retain,
)
if has(vv.__class__)
else vv,
)
for kk, vv in iteritems(v)
)
)
else:
rv.append(v)
else:
rv.append(v)
return rv if tuple_factory is list else tuple_factory(rv)
def has(cls):
"""
Check whether *cls* is a class with ``attrs`` attributes.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:rtype: bool
"""
return getattr(cls, "__attrs_attrs__", None) is not None
def assoc(inst, **changes):
"""
Copy *inst* and apply *changes*.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy.
:return: A copy of inst with *changes* incorporated.
:raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't
be found on *cls*.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. deprecated:: 17.1.0
Use `attrs.evolve` instead if you can.
This function will not be removed du to the slightly different approach
compared to `attrs.evolve`.
"""
import warnings
warnings.warn(
"assoc is deprecated and will be removed after 2018/01.",
DeprecationWarning,
stacklevel=2,
)
new = copy.copy(inst)
attrs = fields(inst.__class__)
for k, v in iteritems(changes):
a = getattr(attrs, k, NOTHING)
if a is NOTHING:
raise AttrsAttributeNotFoundError(
"{k} is not an attrs attribute on {cl}.".format(
k=k, cl=new.__class__
)
)
_obj_setattr(new, k, v)
return new
def evolve(inst, **changes):
"""
Create a new instance, based on *inst* with *changes* applied.
:param inst: Instance of a class with ``attrs`` attributes.
:param changes: Keyword changes in the new copy.
:return: A copy of inst with *changes* incorporated.
:raise TypeError: If *attr_name* couldn't be found in the class
``__init__``.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
.. versionadded:: 17.1.0
"""
cls = inst.__class__
attrs = fields(cls)
for a in attrs:
if not a.init:
continue
attr_name = a.name # To deal with private attributes.
init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
if init_name not in changes:
changes[init_name] = getattr(inst, attr_name)
return cls(**changes)
def resolve_types(cls, globalns=None, localns=None, attribs=None):
"""
Resolve any strings and forward annotations in type annotations.
This is only required if you need concrete types in `Attribute`'s *type*
field. In other words, you don't need to resolve your types if you only
use them for static type checking.
With no arguments, names will be looked up in the module in which the class
was created. If this is not what you want, e.g. if the name only exists
inside a method, you may pass *globalns* or *localns* to specify other
dictionaries in which to look up these names. See the docs of
`typing.get_type_hints` for more details.
:param type cls: Class to resolve.
:param Optional[dict] globalns: Dictionary containing global variables.
:param Optional[dict] localns: Dictionary containing local variables.
:param Optional[list] attribs: List of attribs for the given class.
This is necessary when calling from inside a ``field_transformer``
since *cls* is not an ``attrs`` class yet.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class and you didn't pass any attribs.
:raise NameError: If types cannot be resolved because of missing variables.
:returns: *cls* so you can use this function also as a class decorator.
Please note that you have to apply it **after** `attrs.define`. That
means the decorator has to come in the line **before** `attrs.define`.
.. versionadded:: 20.1.0
.. versionadded:: 21.1.0 *attribs*
"""
# Since calling get_type_hints is expensive we cache whether we've
# done it already.
if getattr(cls, "__attrs_types_resolved__", None) != cls:
import typing
hints = typing.get_type_hints(cls, globalns=globalns, localns=localns)
for field in fields(cls) if attribs is None else attribs:
if field.name in hints:
# Since fields have been frozen we must work around it.
_obj_setattr(field, "type", hints[field.name])
# We store the class we resolved so that subclasses know they haven't
# been resolved.
cls.__attrs_types_resolved__ = cls
# Return the class so you can use it as a decorator too.
return cls
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/licensedcode/_vendor/attr/_funcs.py
| 0.874158 | 0.177668 |
_funcs.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import functools
from ._compat import new_class
from ._make import _make_ne
_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="}
def cmp_using(
eq=None,
lt=None,
le=None,
gt=None,
ge=None,
require_same_type=True,
class_name="Comparable",
):
"""
Create a class that can be passed into `attr.ib`'s ``eq``, ``order``, and
``cmp`` arguments to customize field comparison.
The resulting class will have a full set of ordering methods if
at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided.
:param Optional[callable] eq: `callable` used to evaluate equality
of two objects.
:param Optional[callable] lt: `callable` used to evaluate whether
one object is less than another object.
:param Optional[callable] le: `callable` used to evaluate whether
one object is less than or equal to another object.
:param Optional[callable] gt: `callable` used to evaluate whether
one object is greater than another object.
:param Optional[callable] ge: `callable` used to evaluate whether
one object is greater than or equal to another object.
:param bool require_same_type: When `True`, equality and ordering methods
will return `NotImplemented` if objects are not of the same type.
:param Optional[str] class_name: Name of class. Defaults to 'Comparable'.
See `comparison` for more details.
.. versionadded:: 21.1.0
"""
body = {
"__slots__": ["value"],
"__init__": _make_init(),
"_requirements": [],
"_is_comparable_to": _is_comparable_to,
}
# Add operations.
num_order_functions = 0
has_eq_function = False
if eq is not None:
has_eq_function = True
body["__eq__"] = _make_operator("eq", eq)
body["__ne__"] = _make_ne()
if lt is not None:
num_order_functions += 1
body["__lt__"] = _make_operator("lt", lt)
if le is not None:
num_order_functions += 1
body["__le__"] = _make_operator("le", le)
if gt is not None:
num_order_functions += 1
body["__gt__"] = _make_operator("gt", gt)
if ge is not None:
num_order_functions += 1
body["__ge__"] = _make_operator("ge", ge)
type_ = new_class(class_name, (object,), {}, lambda ns: ns.update(body))
# Add same type requirement.
if require_same_type:
type_._requirements.append(_check_same_type)
# Add total ordering if at least one operation was defined.
if 0 < num_order_functions < 4:
if not has_eq_function:
# functools.total_ordering requires __eq__ to be defined,
# so raise early error here to keep a nice stack.
raise ValueError(
"eq must be define is order to complete ordering from "
"lt, le, gt, ge."
)
type_ = functools.total_ordering(type_)
return type_
def _make_init():
"""
Create __init__ method.
"""
def __init__(self, value):
"""
Initialize object with *value*.
"""
self.value = value
return __init__
def _make_operator(name, func):
"""
Create operator method.
"""
def method(self, other):
if not self._is_comparable_to(other):
return NotImplemented
result = func(self.value, other.value)
if result is NotImplemented:
return NotImplemented
return result
method.__name__ = "__%s__" % (name,)
method.__doc__ = "Return a %s b. Computed by attrs." % (
_operation_names[name],
)
return method
def _is_comparable_to(self, other):
"""
Check whether `other` is comparable to `self`.
"""
for func in self._requirements:
if not func(self, other):
return False
return True
def _check_same_type(self, other):
"""
Return True if *self* and *other* are of the same type, False otherwise.
"""
return other.value.__class__ is self.value.__class__
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/licensedcode/_vendor/attr/_cmp.py
| 0.847021 | 0.336876 |
_cmp.py
|
pypi
|
def get_relative_path(root_path, path):
"""
Return a path relativefrom the posix 'path' relative to a
base path of `len_base_path` length where the base is a directory if
`base_is_dir` True or a file otherwise.
"""
return path[len(root_path):].lstrip('/')
LEGAL_STARTS_ENDS = (
'copying',
'copyright',
'copyrights',
'copyleft',
'notice',
'license',
'licenses',
'licence',
'licences',
'licensing',
'licencing',
'legal',
'eula',
'agreement',
'copyleft',
'patent',
'patents',
)
_MANIFEST_ENDS = {
'.about': 'ABOUT file',
'/bower.json': 'bower',
'/project.clj': 'clojure',
'.podspec': 'cocoapod',
'/composer.json': 'composer',
'/description': 'cran',
'/elm-package.json': 'elm',
'/+compact_manifest': 'freebsd',
'+manifest': 'freebsd',
'.gemspec': 'gem',
'/metadata': 'gem',
# the extracted metadata of a gem archive
'/metadata.gz-extract': 'gem',
'/build.gradle': 'gradle',
'/project.clj': 'clojure',
'.pom': 'maven',
'/pom.xml': 'maven',
'.cabal': 'haskell',
'/haxelib.json': 'haxe',
'/package.json': 'npm',
'.nuspec': 'nuget',
'.pod': 'perl',
'/meta.yml': 'perl',
'/dist.ini': 'perl',
'/pipfile': 'pypi',
'/setup.cfg': 'pypi',
'/setup.py': 'pypi',
'/PKG-INFO': 'pypi',
'/pyproject.toml': 'pypi',
'.spec': 'rpm',
'/cargo.toml': 'rust',
'.spdx': 'spdx',
'/dependencies': 'generic',
# note that these two cannot be top-level for now
'debian/copyright': 'deb',
'meta-inf/manifest.mf': 'maven',
# TODO: Maven also has sometimes a pom under META-INF/
# 'META-INF/manifest.mf': 'JAR and OSGI',
}
MANIFEST_ENDS = tuple(_MANIFEST_ENDS)
README_STARTS_ENDS = (
'readme',
)
def check_resource_name_start_and_end(resource, STARTS_ENDS):
"""
Return True if `resource.name` or `resource.base_name` begins or ends with
an element of `STARTS_ENDS`
"""
name = resource.name.lower()
base_name = resource.base_name.lower()
return (
name.startswith(STARTS_ENDS)
or name.endswith(STARTS_ENDS)
or base_name.startswith(STARTS_ENDS)
or base_name.endswith(STARTS_ENDS)
)
def set_classification_flags(resource,
_LEGAL=LEGAL_STARTS_ENDS,
_MANIF=MANIFEST_ENDS,
_README=README_STARTS_ENDS,
):
"""
Set classification flags on the `resource` Resource
"""
path = resource.path.lower()
resource.is_legal = is_legal = check_resource_name_start_and_end(resource, _LEGAL)
resource.is_readme = is_readme = check_resource_name_start_and_end(resource, _README)
# FIXME: this will never be picked up as this is NOT available in a pre-scan plugin
has_package_data = bool(getattr(resource, 'package_data', False))
resource.is_manifest = is_manifest = path.endswith(_MANIF) or has_package_data
resource.is_key_file = (resource.is_top_level and (is_readme or is_legal or is_manifest))
return resource
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/summarycode/classify.py
| 0.479016 | 0.190611 |
classify.py
|
pypi
|
from collections import defaultdict
import attr
import click
from commoncode.fileset import get_matches as get_fileset_matches
from plugincode.pre_scan import PreScanPlugin
from plugincode.pre_scan import pre_scan_impl
from commoncode.cliutils import PluggableCommandLineOption
from commoncode.cliutils import PRE_SCAN_GROUP
# Tracing flag
TRACE = False
def logger_debug(*args):
pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
"""
Assign a facet to a file.
A facet is defined by zero or more glob/fnmatch expressions. Multiple facets can
be assigned to a file. The facets definition is a list of (facet, pattern) and a
file is assigned all the facets that have a pattern defintion that match their
path.
Once all files have been assigned a facet, files without a facet are assigned to
the core facet.
The known facets are:
- core - core files of a package. Used as default if no other facet apply.
- data - data files of a package (such as CSV, etc).
- dev - files used at development time (e.g. build scripts, dev tools, etc)
- docs - Documentation files.
- examples - Code example files.
- tests - Test files and tools.
- thirdparty - Embedded code from a third party (aka. vendored or bundled)
See also https://github.com/clearlydefined/clearlydefined/blob/8f58a9a216cf7c129fe2cf6abe1cc6f960535e0b/docs/clearly.md#facets
"""
FACET_CORE = 'core'
FACET_DEV = 'dev'
FACET_TESTS = 'tests'
FACET_DOCS = 'docs'
FACET_DATA = 'data'
FACET_EXAMPLES = 'examples'
FACETS = (
FACET_CORE,
FACET_DEV,
FACET_TESTS,
FACET_DOCS,
FACET_DATA,
FACET_EXAMPLES,
)
def validate_facets(ctx, param, value):
"""
Return the facets if valid or raise a UsageError otherwise.
Validate facets values against the list of known facets.
"""
if not value:
return
_facet_patterns, invalid_facet_definitions = build_facets(value)
if invalid_facet_definitions:
known_msg = ', '.join(FACETS)
uf = '\n'.join(sorted(' ' + x for x in invalid_facet_definitions))
msg = ('Invalid --facet option(s):\n'
'{uf}\n'
'Valid <facet> values are: {known_msg}.\n'.format(**locals()))
raise click.UsageError(msg)
return value
@pre_scan_impl
class AddFacet(PreScanPlugin):
"""
Assign one or more "facet" to each file (and NOT to directories). Facets are
a way to qualify that some part of the scanned code may be core code vs.
test vs. data, etc.
"""
resource_attributes = dict(facets=attr.ib(default=attr.Factory(list), repr=False))
run_order = 20
sort_order = 20
options = [
PluggableCommandLineOption(('--facet',),
multiple=True,
metavar='<facet>=<pattern>',
callback=validate_facets,
help='Add the <facet> to files with a path matching <pattern>.',
help_group=PRE_SCAN_GROUP,
sort_order=80,
)
]
def is_enabled(self, facet, **kwargs):
if TRACE:
logger_debug('is_enabled: facet:', facet)
return bool(facet)
def process_codebase(self, codebase, facet=(), **kwargs):
"""
Add facets to file resources using the `facet` definition of facets.
Each entry in the `facet` sequence is a string as in <facet>:<pattern>
"""
if not facet:
return
facet_definitions, _invalid_facet_definitions = build_facets(facet)
if TRACE:
logger_debug('facet_definitions:', facet_definitions)
# Walk the codebase and set the facets for each file (and only files)
for resource in codebase.walk(topdown=True):
if not resource.is_file:
continue
facets = compute_path_facets(resource.path, facet_definitions)
if facets:
resource.facets = facets
else:
resource.facets = [FACET_CORE]
resource.save(codebase)
def compute_path_facets(path, facet_definitions):
"""
Return a sorted list of unique facet strings for `path` using the
`facet_definitions` mapping of {pattern: [facet, facet]}.
"""
if not path or not path.strip() or not facet_definitions:
return []
facets = set()
for matches in get_fileset_matches(path, facet_definitions, all_matches=True):
facets.update(matches)
return sorted(facets)
def build_facets(facets, known_facet_names=FACETS):
"""
Return:
- a mapping for facet patterns to a list of unique facet names as
{pattern: [facet, facet, ...]}
- a sorted list of error messages for invalid or unknown facet definitions
found in `facets`.
The `known` facets set of known facets is used for validation.
"""
invalid_facet_definitions = set()
facet_patterns = defaultdict(list)
for facet_def in facets:
facet, _, pattern = facet_def.partition('=')
facet = facet.strip().lower()
pattern = pattern.strip()
if not pattern:
invalid_facet_definitions.add(
'missing <pattern> in "{facet_def}".'.format(**locals()))
continue
if not facet:
invalid_facet_definitions.add(
'missing <facet> in "{facet_def}".'.format(**locals()))
continue
if facet not in known_facet_names:
invalid_facet_definitions.add(
'unknown <facet> in "{facet_def}".'.format(**locals()))
continue
facets = facet_patterns[pattern]
if facet not in facets:
facet_patterns[pattern].append(facet)
return facet_patterns, sorted(invalid_facet_definitions)
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/summarycode/facet.py
| 0.681833 | 0.260848 |
facet.py
|
pypi
|
def get_resource_summary(resource, key, as_attribute=False):
"""
Return the "summary" value as mapping for the `key` summary attribute of a
resource.
This is collected either from a direct Resource.summary attribute if
`as_attribute` is True or as a Resource.extra_data summary item otherwise.
"""
if as_attribute:
summary = resource.summary
else:
summary = resource.extra_data.get('summary', {})
summary = summary or {}
return summary.get(key) or None
def set_resource_summary(resource, key, value, as_attribute=False):
"""
Set `value` as the "summary" value for the `key` summary attribute of a
resource
This is set either in a direct Resource.summary attribute if `as_attribute`
is True or as a Resource.extra_data summary item otherwise.
"""
if as_attribute:
resource.summary[key] = value
else:
summary = resource.extra_data.get('summary')
if not summary:
summary = dict([(key, value)])
resource.extra_data['summary'] = summary
summary[key] = value
def sorted_counter(counter):
"""
Return a list of ordered mapping of {value:val, count:cnt} built from a
`counter` mapping of {value: count} and sortedd by decreasing count then by
value.
"""
def by_count_value(value_count):
value, count = value_count
return -count, value or ''
summarized = [
dict([('value', value), ('count', count)])
for value, count in sorted(counter.items(), key=by_count_value)]
return summarized
def get_resource_tallies(resource, key, as_attribute=False):
"""
Return the "tallies" value as mapping for the `key` tallies attribute of a
resource.
This is collected either from a direct Resource.tallies attribute if
`as_attribute` is True or as a Resource.extra_data tallies item otherwise.
"""
if as_attribute:
tallies = resource.tallies
else:
tallies = resource.extra_data.get('tallies', {})
tallies = tallies or {}
return tallies.get(key) or None
def set_resource_tallies(resource, key, value, as_attribute=False):
"""
Set `value` as the "tallies" value for the `key` tallies attribute of a
resource
This is set either in a direct Resource.tallies attribute if `as_attribute`
is True or as a Resource.extra_data tallies item otherwise.
"""
if as_attribute:
resource.tallies[key] = value
else:
tallies = resource.extra_data.get('tallies')
if not tallies:
tallies = dict([(key, value)])
resource.extra_data['tallies'] = tallies
tallies[key] = value
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/summarycode/utils.py
| 0.874319 | 0.429609 |
utils.py
|
pypi
|
from collections import defaultdict
import re
import attr
import fingerprints
from text_unidecode import unidecode
from cluecode.copyrights import CopyrightDetector
from commoncode.text import toascii
from summarycode.utils import sorted_counter
from summarycode.utils import get_resource_tallies
from summarycode.utils import set_resource_tallies
# Tracing flags
TRACE = False
TRACE_FP = False
TRACE_DEEP = False
TRACE_TEXT = False
TRACE_CANO = False
def logger_debug(*args):
pass
if TRACE or TRACE_CANO:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
# TODO: keep the original order of statements as much as possible
def copyright_tallies(resource, children, keep_details=False):
return build_tallies(
resource=resource,
children=children,
attributes_list='copyrights',
attribute_value='copyright',
tallier=tally_copyrights,
keep_details=keep_details
)
def holder_tallies(resource, children, keep_details=False):
return build_tallies(
resource=resource,
children=children,
attributes_list='holders',
attribute_value='holder',
tallier=tally_persons,
keep_details=keep_details
)
def author_tallies(resource, children, keep_details=False):
return build_tallies(
resource=resource,
children=children,
attributes_list='authors',
attribute_value='author',
tallier=tally_persons,
keep_details=keep_details
)
def build_tallies(
resource,
children,
attributes_list,
attribute_value,
tallier,
keep_details=False,
):
"""
Update the ``resource`` Resource with a tally of scan fields from itself and its
``children``.
Resources and this for the `attributes_list` values list key (such as
copyrights, etc) and the ``attribute_value`` details key (such as copyright).
- `attributes_list` is the name of the attribute values list
('copyrights', 'holders' etc.)
- `attribute_value` is the name of the attribute value key in this list
('copyright', 'holder' etc.)
- `tallier` is a function that takes a list of texts and returns
texts with counts
"""
# Collect current data
values = getattr(resource, attributes_list, [])
no_detection_counter = 0
if values:
# keep current data as plain strings
candidate_texts = [entry.get(attribute_value) for entry in values]
else:
candidate_texts = []
if resource.is_file:
no_detection_counter += 1
# Collect direct children existing summaries
for child in children:
child_summaries = get_resource_tallies(
child,
key=attributes_list,
as_attribute=keep_details
) or []
for child_summary in child_summaries:
count = child_summary['count']
value = child_summary['value']
if value:
candidate_texts.append(Text(value, value, count))
else:
no_detection_counter += count
# summarize proper using the provided function
tallied = tallier(candidate_texts)
# add back the counter of things without detection
if no_detection_counter:
tallied.update({None: no_detection_counter})
tallied = sorted_counter(tallied)
if TRACE:
logger_debug('COPYRIGHT tallied:', tallied)
set_resource_tallies(
resource,
key=attributes_list,
value=tallied,
as_attribute=keep_details,
)
return tallied
# keep track of an original text value and the corresponding clustering "key"
@attr.attributes(slots=True)
class Text(object):
# cleaned, normalized, clustering text for a copyright holder
key = attr.attrib()
# original text for a copyright holder
original = attr.attrib()
# count of occurences of a text
count = attr.attrib(default=1)
def normalize(self):
if TRACE_TEXT:
logger_debug('Text.normalize:', self)
key = self.key.lower()
key = ' '.join(key.split())
key = key.strip('.,').strip()
key = clean(key)
self.key = key.strip('.,').strip()
def transliterate(self):
self.key = toascii(self.key, translit=True)
def fingerprint(self):
key = self.key
if not isinstance(key, str):
key = unidecode(key)
fp = fingerprints.generate(key)
if TRACE_TEXT or TRACE_FP:
logger_debug('Text.fingerprint:key: ', repr(self.key))
logger_debug('Text.fingerprint:fp : ', fingerprints.generate(unidecode(self.key)))
self.key = fp
def tally_copyrights(texts, _detector=CopyrightDetector()):
"""
Return a list of mapping of {value:string, count:int} given a
list of copyright strings or Text() objects.
"""
texts_to_tally = []
no_detection_counter = 0
for text in texts:
if not text:
no_detection_counter += 1
continue
# Keep Text objects as-is
if isinstance(text, Text):
texts_to_tally.append(text)
else:
# FIXME: redetect to strip year should not be needed!!
statements_without_years = _detector.detect(
[(1, text)],
include_copyrights=True,
include_holders=False,
include_authors=False,
include_copyright_years=False,
)
for detection in statements_without_years:
copyr = detection.copyright
texts_to_tally.append(Text(copyr, copyr))
counter = tally(texts_to_tally)
if no_detection_counter:
counter[None] = no_detection_counter
return counter
def tally_persons(texts):
"""
Return a list of mapping of {value:string, count:int} given a
list of holders strings or Text() objects.
"""
texts_to_tally = []
no_detection_counter = 0
for text in texts:
if not text:
no_detection_counter += 1
continue
# Keep Text objects as-is
if isinstance(text, Text):
texts_to_tally.append(text)
else:
cano = canonical_holder(text)
texts_to_tally.append(Text(cano, cano))
counter = tally(texts_to_tally)
if no_detection_counter:
counter[None] = no_detection_counter
return counter
def tally(summary_texts):
"""
Return a mapping of {value: count} given a list of Text objects
(representing either copyrights, holders or authors).
"""
if TRACE:
logger_debug('summarize: INITIAL texts:')
for s in summary_texts:
logger_debug(' ', s)
for text in summary_texts:
text.normalize()
if TRACE_DEEP:
logger_debug('summarize: NORMALIZED 1 texts:')
for s in summary_texts:
logger_debug(' ', s)
texts = list(filter_junk(summary_texts))
if TRACE_DEEP:
logger_debug('summarize: DEJUNKED texts:')
for s in summary_texts:
logger_debug(' ', s)
for t in texts:
t.normalize()
if TRACE_DEEP:
logger_debug('summarize: NORMALIZED 2 texts:')
for s in summary_texts:
logger_debug(' ', s)
# keep non-empties
texts = list(t for t in texts if t.key)
if TRACE_DEEP:
logger_debug('summarize: NON-EMPTY 1 texts:')
for s in summary_texts:
logger_debug(' ', s)
# convert to plain ASCII, then fingerprint
for t in texts:
t.transliterate()
if TRACE_DEEP:
logger_debug('summarize: ASCII texts:')
for s in summary_texts:
logger_debug(' ', s)
for t in texts:
t.fingerprint()
if TRACE_DEEP or TRACE_FP:
logger_debug('summarize: FINGERPRINTED texts:')
for s in summary_texts:
logger_debug(' ', s)
# keep non-empties
texts = list(t for t in texts if t.key)
if TRACE_DEEP:
logger_debug('summarize: NON-EMPTY 2 texts:')
for s in summary_texts:
logger_debug(' ', s)
# cluster
clusters = cluster(texts)
if TRACE_DEEP:
clusters = list(clusters)
logger_debug('summarize: CLUSTERS:')
for c in clusters:
logger_debug(' ', c)
counter = {text.original: count for text, count in clusters}
if TRACE:
logger_debug('summarize: FINAL SUMMARIZED:')
for c in counter:
logger_debug(' ', c)
return counter
def cluster(texts):
"""
Given a `texts` iterable of Text objects, group these objects when they have the
same key. Yield a tuple of (Text object, count of its occurences).
"""
clusters = defaultdict(list)
for text in texts:
clusters[text.key].append(text)
for cluster_key, cluster_texts in clusters.items():
try:
# keep the longest as the representative value for a cluster
cluster_texts.sort(key=lambda x:-len(x.key))
representative = cluster_texts[0]
count = sum(t.count for t in cluster_texts)
if TRACE_DEEP:
logger_debug('cluster: representative, count', representative, count)
yield representative, count
except Exception as e:
msg = (
f'Error in cluster(): cluster_key: {cluster_key!r}, '
f'cluster_texts: {cluster_texts!r}\n'
)
import traceback
msg += traceback.format_exc()
raise Exception(msg) from e
def clean(text):
"""
Return an updated and cleaned Text object from a `text` Text object
normalizing some pucntuations around some name and acronyms.
"""
if not text:
return text
text = text.replace('A. M.', 'A.M.')
return text
# set of common prefixes that can be trimmed from a name
prefixes = frozenset([
'his',
'by',
'from',
'and',
'of',
'for',
'<p>',
])
def strip_prefixes(s, prefixes=prefixes):
"""
Return the `s` string with any of the string in the `prefixes` set
striped from the left. Normalize and strip spaces.
For example:
>>> s = 'by AND for the Free Software Foundation'
>>> strip_prefixes(s) == 'the Free Software Foundation'
True
"""
s = s.split()
while s and s[0].lower().strip().strip('.,') in prefixes:
s = s[1:]
return ' '.join(s)
# set of suffixes that can be stripped from a name
suffixes = frozenset([
'(minizip)',
])
def strip_suffixes(s, suffixes=suffixes):
"""
Return the `s` string with any of the string in the `suffixes` set
striped from the right. Normalize and strip spaces.
For example:
>>> s = 'RedHat Inc corp'
>>> strip_suffixes(s, set(['corp'])) == 'RedHat Inc'
True
"""
s = s.split()
while s and s[-1].lower().strip().strip('.,') in suffixes:
s = s[:-1]
return u' '.join(s)
# TODO: we need a gazeteer of places and or use usaddress and probablepeople or
# refine the POS tagging to catch these better
JUNK_HOLDERS = frozenset([
'advanced computing',
'inc',
'llc',
'ltd',
'berlin',
'munich',
'massachusetts',
'maynard',
'cambridge',
'norway',
'and',
'is',
'a',
'cedar rapids',
'iowa',
'u.s.a',
'u.s.a.',
'usa',
'source code',
'mountain view',
'england',
'web applications',
'menlo park',
'california',
'irvine',
'pune',
'india',
'stockholm',
'sweden',
'sweden)',
'software',
'france',
'concord',
'date here',
'software',
'not',
])
def filter_junk(texts):
"""
Filter junk from an iterable of texts objects.
"""
for text in texts:
if not text.key:
continue
if text.key.lower() in JUNK_HOLDERS:
continue
if text.key.isdigit():
continue
if len(text.key) == 1:
continue
yield text
# Mapping of commonly abbreviated names to their expanded, canonical forms.
# This is mostly of use when these common names show as holders without their
# proper company suffix
COMMON_NAMES = {
'3dfxinteractiveinc.': '3dfx Interactive',
'cern': 'CERN - European Organization for Nuclear Research',
'ciscosystemsinc': 'Cisco Systems',
'ciscosystems': 'Cisco Systems',
'cisco': 'Cisco Systems',
'daisy': 'Daisy',
'daisyltd': 'Daisy',
'fsf': 'Free Software Foundation',
'freesoftwarefoundation': 'Free Software Foundation',
'freesoftwarefoundationinc': 'Free Software Foundation',
'thefreesoftwarefoundation': 'Free Software Foundation',
'thefreesoftwarefoundationinc': 'Free Software Foundation',
'hp': 'Hewlett-Packard',
'hewlettpackard': 'Hewlett-Packard',
'hewlettpackardco': 'Hewlett-Packard',
'hpcompany': 'Hewlett-Packard',
'hpdevelopmentcompanylp': 'Hewlett-Packard',
'hpdevelopmentcompany': 'Hewlett-Packard',
'hewlettpackardcompany': 'Hewlett-Packard',
'theandroidopensourceproject': 'Android Open Source Project',
'androidopensourceproject': 'Android Open Source Project',
'ibm': 'IBM',
'redhat': 'Red Hat',
'redhatinc': 'Red Hat',
'softwareinthepublicinterest': 'Software in the Public Interest',
'spiinc': 'Software in the Public Interest',
'suse': 'SuSE',
'suseinc': 'SuSE',
'sunmicrosystems': 'Sun Microsystems',
'sunmicrosystemsinc': 'Sun Microsystems',
'sunmicro': 'Sun Microsystems',
'thaiopensourcesoftwarecenter': 'Thai Open Source Software Center',
'apachefoundation': 'The Apache Software Foundation',
'apachegroup': 'The Apache Software Foundation',
'apache': 'The Apache Software Foundation',
'apachesoftwarefoundation': 'The Apache Software Foundation',
'theapachegroup': 'The Apache Software Foundation',
'eclipse': 'The Eclipse Foundation',
'eclipsefoundation': 'The Eclipse Foundation',
'regentsoftheuniversityofcalifornia': 'The Regents of the University of California',
'borland': 'Borland',
'borlandcorp': 'Borland',
'microsoft': 'Microsoft',
'microsoftcorp': 'Microsoft',
'microsoftinc': 'Microsoft',
'microsoftcorporation': 'Microsoft',
'google': 'Google',
'googlellc': 'Google',
'googleinc': 'Google',
'intel': 'Intel',
}
# Remove everything except letters and numbers
_keep_only_chars = re.compile('[_\\W]+', re.UNICODE).sub # NOQA
def keep_only_chars(s):
return _keep_only_chars('', s)
def canonical_holder(s):
"""
Return a canonical holder for string `s` or s.
"""
key = keep_only_chars(s).lower()
cano = COMMON_NAMES.get(key)
if TRACE_CANO:
logger_debug('cano: for s:', s, 'with key:', key, 'is cano:', cano)
s = cano or s
s = strip_suffixes(s)
return s
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/summarycode/copyright_tallies.py
| 0.444806 | 0.157363 |
copyright_tallies.py
|
pypi
|
from collections import Counter
import attr
from commoncode.cliutils import POST_SCAN_GROUP, PluggableCommandLineOption
from plugincode.post_scan import PostScanPlugin, post_scan_impl
from summarycode.utils import (get_resource_tallies, set_resource_tallies,
sorted_counter)
# Tracing flags
TRACE = False
TRACE_LIGHT = False
def logger_debug(*args):
pass
if TRACE or TRACE_LIGHT:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
"""
Create summarized scan data.
"""
@post_scan_impl
class Tallies(PostScanPlugin):
"""
Compute tallies for license, copyright and other scans at the codebase level
"""
run_order = 15
sort_order = 15
codebase_attributes = dict(tallies=attr.ib(default=attr.Factory(dict)))
options = [
PluggableCommandLineOption(('--tallies',),
is_flag=True, default=False,
help='Compute tallies for license, copyright and other scans at the codebase level.',
help_group=POST_SCAN_GROUP)
]
def is_enabled(self, tallies, **kwargs):
return tallies
def process_codebase(self, codebase, tallies, **kwargs):
if TRACE_LIGHT: logger_debug('Tallies:process_codebase')
tallies = compute_codebase_tallies(codebase, keep_details=False, **kwargs)
codebase.attributes.tallies.update(tallies)
@post_scan_impl
class TalliesWithDetails(PostScanPlugin):
"""
Compute tallies of different scan attributes of a scan at the codebase level and
keep file and directory details.
The scan attributes that are tallied are:
- detected_license_expression
- copyrights
- holders
- authors
- programming_language
- packages
"""
# mapping of tally data at the codebase level for the whole codebase
codebase_attributes = dict(tallies=attr.ib(default=attr.Factory(dict)))
# store tallies at the file and directory level in this attribute when
# keep details is True
resource_attributes = dict(tallies=attr.ib(default=attr.Factory(dict)))
run_order = 100
sort_order = 100
options = [
PluggableCommandLineOption(('--tallies-with-details',),
is_flag=True, default=False,
help='Compute tallies of license, copyright and other scans at the codebase level, '
'keeping intermediate details at the file and directory level.',
help_group=POST_SCAN_GROUP)
]
def is_enabled(self, tallies_with_details, **kwargs):
return tallies_with_details
def process_codebase(self, codebase, tallies_with_details, **kwargs):
tallies = compute_codebase_tallies(codebase, keep_details=True, **kwargs)
codebase.attributes.tallies.update(tallies)
def compute_codebase_tallies(codebase, keep_details, **kwargs):
"""
Compute tallies of a scan at the codebase level for available scans.
If `keep_details` is True, also keep file and directory details in the
`tallies` file attribute for every file and directory.
"""
from summarycode.copyright_tallies import (author_tallies,
copyright_tallies,
holder_tallies)
attrib_summarizers = [
('detected_license_expression', license_tallies),
('copyrights', copyright_tallies),
('holders', holder_tallies),
('authors', author_tallies),
('programming_language', language_tallies),
('packages', package_tallies),
]
# find which attributes are available for summarization by checking the root
# resource
root = codebase.root
summarizers = [s for a, s in attrib_summarizers if hasattr(root, a)]
if TRACE: logger_debug('compute_codebase_tallies with:', summarizers)
# collect and set resource-level summaries
for resource in codebase.walk(topdown=False):
children = resource.children(codebase)
for summarizer in summarizers:
_summary_data = summarizer(resource, children, keep_details=keep_details)
if TRACE: logger_debug('tallies for:', resource.path, 'after tallies:', summarizer, 'is:', _summary_data)
codebase.save_resource(resource)
# set the tallies from the root resource at the codebase level
if keep_details:
tallies = root.tallies
else:
tallies = root.extra_data.get('tallies', {})
if TRACE: logger_debug('codebase tallies:', tallies)
return tallies
def license_tallies(resource, children, keep_details=False):
"""
Populate a license_expressions list of mappings such as
{value: "expression", count: "count of occurences"}
sorted by decreasing count.
"""
LIC_EXP = 'detected_license_expression'
LIC_DET = 'license_detections'
LIC_CLUE = 'license_clues'
license_expressions = []
# Collect current data
detected_expressions = []
for detection in getattr(resource, LIC_DET, []):
detected_expressions.append(detection["license_expression"])
for match in getattr(resource, LIC_CLUE, []):
detected_expressions.append(match["license_expression"])
package_license_detections = []
PACKAGE_DATA = 'package_data'
package_data = getattr(resource, PACKAGE_DATA, [])
if package_data:
package_license_detections.extend(
[
detection
for detection in getattr(package_data, LIC_DET, [])
if detection
]
)
for detection in package_license_detections:
detected_expressions.append(detection["license_expression"])
if not detected_expressions and resource.is_file:
# also count files with no detection
license_expressions.append(None)
else:
license_expressions.extend(detected_expressions)
# Collect direct children expression tallies
for child in children:
child_tallies = get_resource_tallies(child, key=LIC_EXP, as_attribute=keep_details) or []
for child_tally in child_tallies:
# TODO: review this: this feels rather weird
child_sum_val = child_tally.get('value')
values = [child_sum_val] * child_tally['count']
license_expressions.extend(values)
# summarize proper
licenses_counter = tally_licenses(license_expressions)
tallied = sorted_counter(licenses_counter)
set_resource_tallies(resource, key=LIC_EXP, value=tallied, as_attribute=keep_details)
return tallied
def tally_licenses(license_expressions):
"""
Given a list of license expressions, return a mapping of {expression: count
of occurences}
"""
# TODO: we could normalize and/or sort each license_expression before
# summarization and consider other equivalence or containment checks
return Counter(license_expressions)
def language_tallies(resource, children, keep_details=False):
"""
Populate a programming_language tallies list of mappings such as
{value: "programming_language", count: "count of occurences"}
sorted by decreasing count.
"""
PROG_LANG = 'programming_language'
languages = []
prog_lang = getattr(resource, PROG_LANG , [])
if not prog_lang:
if resource.is_file:
# also count files with no detection
languages.append(None)
else:
languages.append(prog_lang)
# Collect direct children expression summaries
for child in children:
child_tallies = get_resource_tallies(child, key=PROG_LANG, as_attribute=keep_details) or []
for child_tally in child_tallies:
child_sum_val = child_tally.get('value')
if child_sum_val:
values = [child_sum_val] * child_tally['count']
languages.extend(values)
# summarize proper
languages_counter = tally_languages(languages)
tallied = sorted_counter(languages_counter)
set_resource_tallies(resource, key=PROG_LANG, value=tallied, as_attribute=keep_details)
return tallied
def tally_languages(languages):
"""
Given a list of languages, return a mapping of {language: count
of occurences}
"""
# TODO: consider aggregating related langauges (C/C++, etc)
return Counter(languages)
TALLYABLE_ATTRS = set([
'detected_license_expression',
'copyrights',
'holders',
'authors',
'programming_language',
# 'packages',
])
def tally_values(values, attribute):
"""
Given a list of `values` for a given `attribute`, return a mapping of
{value: count of occurences} using a tallier specific to the attribute.
"""
if attribute not in TALLYABLE_ATTRS:
return {}
from summarycode.copyright_tallies import tally_copyrights, tally_persons
value_talliers_by_attr = dict(
detected_license_expression=tally_licenses,
copyrights=tally_copyrights,
holders=tally_persons,
authors=tally_persons,
programming_language=tally_languages,
)
return value_talliers_by_attr[attribute](values)
@post_scan_impl
class KeyFilesTallies(PostScanPlugin):
"""
Compute tallies of a scan at the codebase level for only key files.
"""
run_order = 150
sort_order = 150
# mapping of tally data at the codebase level for key files
codebase_attributes = dict(tallies_of_key_files=attr.ib(default=attr.Factory(dict)))
options = [
PluggableCommandLineOption(('--tallies-key-files',),
is_flag=True, default=False,
help='Compute tallies for license, copyright and other scans for key, '
'top-level files. Key files are top-level codebase files such '
'as COPYING, README and package manifests as reported by the '
'--classify option "is_legal", "is_readme", "is_manifest" '
'and "is_top_level" flags.',
help_group=POST_SCAN_GROUP,
required_options=['classify', 'tallies']
)
]
def is_enabled(self, tallies_key_files, **kwargs):
return tallies_key_files
def process_codebase(self, codebase, tallies_key_files, **kwargs):
tally_codebase_key_files(codebase, **kwargs)
def tally_codebase_key_files(codebase, field='tallies', **kwargs):
"""
Summarize codebase key files.
"""
talliables = codebase.attributes.tallies.keys()
if TRACE: logger_debug('tallieables:', talliables)
# TODO: we cannot summarize packages with "key files" for now
talliables = [k for k in talliables if k in TALLYABLE_ATTRS]
# create one counter for each summarized attribute
talliable_values_by_key = dict([(key, []) for key in talliables])
# filter to get only key files
key_files = (res for res in codebase.walk(topdown=True)
if (res.is_file and res.is_top_level
and (res.is_readme or res.is_legal or res.is_manifest)))
for resource in key_files:
for key, values in talliable_values_by_key.items():
# note we assume things are stored as extra-data, not as direct
# Resource attributes
res_tallies = get_resource_tallies(resource, key=key, as_attribute=False) or []
for tally in res_tallies:
# each tally is a mapping with value/count: we transform back to values
tally_value = tally.get('value')
if tally_value:
values.extend([tally_value] * tally['count'])
tally_counters = []
for key, values in talliable_values_by_key.items():
if key not in TALLYABLE_ATTRS:
continue
tallied = tally_values(values, key)
tally_counters.append((key, tallied))
sorted_tallies = dict(
[(key, sorted_counter(counter)) for key, counter in tally_counters])
codebase.attributes.tallies_of_key_files = sorted_tallies
if TRACE: logger_debug('codebase tallies_of_key_files:', sorted_tallies)
@post_scan_impl
class FacetTallies(PostScanPlugin):
"""
Compute tallies for a scan at the codebase level, grouping by facets.
"""
run_order = 200
sort_order = 200
codebase_attributes = dict(tallies_by_facet=attr.ib(default=attr.Factory(list)))
options = [
PluggableCommandLineOption(('--tallies-by-facet',),
is_flag=True, default=False,
help='Compute tallies for license, copyright and other scans and group the '
'results by facet.',
help_group=POST_SCAN_GROUP,
required_options=['facet', 'tallies']
)
]
def is_enabled(self, tallies_by_facet, **kwargs):
return tallies_by_facet
def process_codebase(self, codebase, tallies_by_facet, **kwargs):
if TRACE_LIGHT: logger_debug('FacetTallies:process_codebase')
tally_codebase_by_facet(codebase, **kwargs)
def tally_codebase_by_facet(codebase, **kwargs):
"""
Summarize codebase by facte.
"""
from summarycode import facet as facet_module
talliable = codebase.attributes.tallies.keys()
if TRACE:
logger_debug('tally_codebase_by_facet for attributes:', talliable)
# create one group of by-facet values lists for each summarized attribute
talliable_values_by_key_by_facet = dict([
(facet, dict([(key, []) for key in talliable]))
for facet in facet_module.FACETS
])
for resource in codebase.walk(topdown=True):
if not resource.is_file:
continue
for facet in resource.facets:
# note: this will fail loudly if the facet is not a known one
values_by_attribute = talliable_values_by_key_by_facet[facet]
for key, values in values_by_attribute.items():
# note we assume things are stored as extra-data, not as direct
# Resource attributes
res_tallies = get_resource_tallies(resource, key=key, as_attribute=False) or []
for tally in res_tallies:
# each tally is a mapping with value/count: we transform back to discrete values
tally_value = tally.get('value')
if tally_value:
values.extend([tally_value] * tally['count'])
final_tallies = []
for facet, talliable_values_by_key in talliable_values_by_key_by_facet.items():
tally_counters = (
(key, tally_values(values, key))
for key, values in talliable_values_by_key.items()
)
sorted_tallies = dict(
[(key, sorted_counter(counter)) for key, counter in tally_counters])
facet_tally = dict(facet=facet)
facet_tally['tallies'] = sorted_tallies
final_tallies.append(facet_tally)
codebase.attributes.tallies_by_facet.extend(final_tallies)
if TRACE: logger_debug('codebase tallies_by_facet:', final_tallies)
def add_files(packages, resource):
"""
Update in-place every package mapping in the `packages` list by updating or
creating the the "files" attribute from the `resource`. Yield back the
packages.
"""
for package in packages:
files = package['files'] = package.get('files') or []
fil = resource.to_dict(skinny=True)
if fil not in files:
files.append(fil)
yield package
def package_tallies(resource, children, keep_details=False):
"""
Populate a packages tally list of packages mappings.
Note: `keep_details` is never used, as we are not keeping details of
packages as this has no value.
"""
packages = []
# Collect current data
current_packages = getattr(resource, 'packages') or []
if TRACE_LIGHT and current_packages:
from packagedcode.models import Package
packs = [Package(**p) for p in current_packages]
logger_debug('package_tallier: for:', resource,
'current_packages are:', packs)
current_packages = add_files(current_packages, resource)
packages.extend(current_packages)
if TRACE_LIGHT and packages:
logger_debug()
from packagedcode.models import Package # NOQA
packs = [Package(**p) for p in packages]
logger_debug('package_tallier: for:', resource,
'packages are:', packs)
# Collect direct children packages tallies
for child in children:
child_tallies = get_resource_tallies(child, key='packages', as_attribute=False) or []
packages.extend(child_tallies)
# summarize proper
set_resource_tallies(resource, key='packages', value=packages, as_attribute=False)
return packages
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/summarycode/tallies.py
| 0.550366 | 0.161982 |
tallies.py
|
pypi
|
from django import forms
from django.apps import apps
from django.core.exceptions import ValidationError
from taggit.forms import TagField
from taggit.forms import TagWidget
from scanpipe.models import Project
from scanpipe.pipes.fetch import fetch_urls
scanpipe_app = apps.get_app_config("scanpipe")
class MultipleFileInput(forms.ClearableFileInput):
allow_multiple_selected = True
class MultipleFileField(forms.FileField):
def __init__(self, *args, **kwargs):
kwargs.setdefault("widget", MultipleFileInput(attrs={"class": "file-input"}))
super().__init__(*args, **kwargs)
def clean(self, data, initial=None):
single_file_clean = super().clean
if isinstance(data, (list, tuple)):
result = [single_file_clean(entry, initial) for entry in data]
else:
result = single_file_clean(data, initial)
return result
class InputsBaseForm(forms.Form):
input_files = MultipleFileField(required=False)
input_urls = forms.CharField(
label="Download URLs",
required=False,
help_text="Provide one or more URLs to download, one per line.",
widget=forms.Textarea(
attrs={
"class": "textarea is-dynamic",
"rows": 2,
"placeholder": (
"https://domain.com/archive.zip\n"
"docker://docker-reference (e.g.: docker://postgres:13)"
),
},
),
)
class Media:
js = ("add-inputs.js",)
def clean_input_urls(self):
"""
Fetch the `input_urls` and sets the `downloads` objects in the cleaned_data.
A validation error is raised, if at least one URL can't be fetched.
"""
input_urls = self.cleaned_data.get("input_urls", [])
self.cleaned_data["downloads"], errors = fetch_urls(input_urls)
if errors:
raise ValidationError("Could not fetch: " + "\n".join(errors))
return input_urls
def handle_inputs(self, project):
input_files = self.files.getlist("input_files")
downloads = self.cleaned_data.get("downloads")
if input_files:
project.add_uploads(input_files)
if downloads:
project.add_downloads(downloads)
class PipelineBaseForm(forms.Form):
pipeline = forms.ChoiceField(
choices=scanpipe_app.get_pipeline_choices(),
required=False,
)
execute_now = forms.BooleanField(
label="Execute pipeline now",
initial=True,
required=False,
)
def handle_pipeline(self, project):
pipeline = self.cleaned_data["pipeline"]
execute_now = self.cleaned_data["execute_now"]
if pipeline:
project.add_pipeline(pipeline, execute_now)
class ProjectForm(InputsBaseForm, PipelineBaseForm, forms.ModelForm):
class Meta:
model = Project
fields = [
"name",
"input_files",
"input_urls",
"pipeline",
"execute_now",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
name_field = self.fields["name"]
name_field.widget.attrs["class"] = "input"
name_field.widget.attrs["autofocus"] = True
name_field.help_text = "The unique name of your project."
def clean_name(self):
return " ".join(self.cleaned_data["name"].split())
def save(self, *args, **kwargs):
project = super().save(*args, **kwargs)
self.handle_inputs(project)
self.handle_pipeline(project)
return project
class AddInputsForm(InputsBaseForm, forms.Form):
def save(self, project):
self.handle_inputs(project)
return project
class AddPipelineForm(PipelineBaseForm):
pipeline = forms.ChoiceField(
choices=[
(name, pipeline_class.get_summary())
for name, pipeline_class in scanpipe_app.pipelines.items()
],
widget=forms.RadioSelect(),
required=True,
)
def save(self, project):
self.handle_pipeline(project)
return project
class AddLabelsForm(forms.Form):
labels = TagField(
label="Add labels to this project:",
widget=TagWidget(
attrs={"class": "input", "placeholder": "Comma-separated list of labels"}
),
)
def save(self, project):
project.labels.add(*self.cleaned_data["labels"])
return project
class ArchiveProjectForm(forms.Form):
remove_input = forms.BooleanField(
label="Remove inputs",
initial=True,
required=False,
)
remove_codebase = forms.BooleanField(
label="Remove codebase",
initial=True,
required=False,
)
remove_output = forms.BooleanField(
label="Remove outputs",
initial=False,
required=False,
)
class ListTextarea(forms.CharField):
"""
A Django form field that displays as a textarea and converts each line of input
into a list of items.
This field extends the `CharField` and uses the `Textarea` widget to display the
input as a textarea.
Each line of the textarea input is split into items, removing leading/trailing
whitespace and empty lines.
The resulting list of items is then stored as the field value.
"""
widget = forms.Textarea
def to_python(self, value):
"""Split the textarea input into lines and remove empty lines."""
if value:
return [line.strip() for line in value.splitlines() if line.strip()]
def prepare_value(self, value):
"""Join the list items into a string with newlines."""
if value is not None:
value = "\n".join(value)
return value
class ProjectSettingsForm(forms.ModelForm):
settings_fields = [
"extract_recursively",
"ignored_patterns",
"scancode_license_score",
"attribution_template",
]
extract_recursively = forms.BooleanField(
label="Extract recursively",
required=False,
initial=True,
help_text="Extract nested archives-in-archives recursively",
widget=forms.CheckboxInput(attrs={"class": "checkbox mr-1"}),
)
ignored_patterns = ListTextarea(
label="Ignored patterns",
required=False,
help_text="Provide one or more path patterns to be ignored, one per line.",
widget=forms.Textarea(
attrs={
"class": "textarea is-dynamic",
"rows": 3,
"placeholder": "*.xml\ntests/*\n*docs/*.rst",
},
),
)
scancode_license_score = forms.IntegerField(
label="License score",
min_value=0,
max_value=100,
required=False,
help_text=(
"Do not return license matches with a score lower than this score. "
"A number between 0 and 100."
),
widget=forms.NumberInput(attrs={"class": "input"}),
)
attribution_template = forms.CharField(
label="Attribution template",
required=False,
help_text="Custom attribution template.",
widget=forms.Textarea(attrs={"class": "textarea is-dynamic", "rows": 3}),
)
class Meta:
model = Project
fields = [
"name",
"notes",
]
widgets = {
"name": forms.TextInput(attrs={"class": "input"}),
"notes": forms.Textarea(attrs={"rows": 3, "class": "textarea is-dynamic"}),
}
def __init__(self, *args, **kwargs):
"""Load initial values from Project ``settings`` field."""
super().__init__(*args, **kwargs)
for field_name in self.settings_fields:
field = self.fields[field_name]
# Do not override the field ``initial`` if the key is not in the settings
if field_name in self.instance.settings:
field.initial = self.instance.settings.get(field_name)
def save(self, *args, **kwargs):
project = super().save(*args, **kwargs)
self.update_project_settings(project)
return project
def update_project_settings(self, project):
"""Update Project ``settings`` field values from form data."""
config = {
field_name: self.cleaned_data[field_name]
for field_name in self.settings_fields
}
project.settings.update(config)
project.save(update_fields=["settings"])
class ProjectCloneForm(forms.Form):
clone_name = forms.CharField(widget=forms.TextInput(attrs={"class": "input"}))
copy_inputs = forms.BooleanField(
initial=True,
required=False,
help_text="Input files located in the input/ work directory will be copied.",
widget=forms.CheckboxInput(attrs={"class": "checkbox mr-1"}),
)
copy_pipelines = forms.BooleanField(
initial=True,
required=False,
help_text="All pipelines assigned to the original project will be copied over.",
widget=forms.CheckboxInput(attrs={"class": "checkbox mr-1"}),
)
copy_settings = forms.BooleanField(
initial=True,
required=False,
help_text="All project settings will be copied.",
widget=forms.CheckboxInput(attrs={"class": "checkbox mr-1"}),
)
execute_now = forms.BooleanField(
label="Execute copied pipeline(s) now",
initial=False,
required=False,
help_text="Copied pipelines will be directly executed.",
)
def __init__(self, instance, *args, **kwargs):
self.project = instance
super().__init__(*args, **kwargs)
self.fields["clone_name"].initial = f"{self.project.name} clone"
def clean_clone_name(self):
clone_name = self.cleaned_data.get("clone_name")
if Project.objects.filter(name=clone_name).exists():
raise ValidationError("Project with this name already exists.")
return clone_name
def save(self, *args, **kwargs):
return self.project.clone(**self.cleaned_data)
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/forms.py
| 0.695235 | 0.178884 |
forms.py
|
pypi
|
from django.db import migrations, models
import django.db.models.deletion
import scanpipe.models
class Migration(migrations.Migration):
dependencies = [
("scanpipe", "0021_codebaseresource_package_data"),
]
operations = [
migrations.RenameField(
model_name='discoveredpackage',
old_name='dependencies',
new_name='dependencies_data',
),
migrations.CreateModel(
name="DiscoveredDependency",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"type",
models.CharField(
blank=True,
help_text="A short code to identify the type of this package. For example: gem for a Rubygem, docker for a container, pypi for a Python Wheel or Egg, maven for a Maven Jar, deb for a Debian package, etc.",
max_length=16,
),
),
(
"namespace",
models.CharField(
blank=True,
help_text="Package name prefix, such as Maven groupid, Docker image owner, GitHub user or organization, etc.",
max_length=255,
),
),
(
"name",
models.CharField(
blank=True, help_text="Name of the package.", max_length=100
),
),
(
"version",
models.CharField(
blank=True, help_text="Version of the package.", max_length=100
),
),
(
"qualifiers",
models.CharField(
blank=True,
help_text="Extra qualifying data for a package such as the name of an OS, architecture, distro, etc.",
max_length=1024,
),
),
(
"subpath",
models.CharField(
blank=True,
help_text="Extra subpath within a package, relative to the package root.",
max_length=200,
),
),
(
"dependency_uid",
models.CharField(
help_text="The unique identifier of this dependency.",
max_length=1024,
),
),
(
"extracted_requirement",
models.CharField(
blank=True,
help_text="The version requirements of this dependency.",
max_length=256,
),
),
(
"scope",
models.CharField(
blank=True,
help_text="The scope of this dependency, how it is used in a project.",
max_length=64,
),
),
(
"datasource_id",
models.CharField(
blank=True,
help_text="The identifier for the datafile handler used to obtain this dependency.",
max_length=64,
),
),
("is_runtime", models.BooleanField(default=False)),
("is_optional", models.BooleanField(default=False)),
("is_resolved", models.BooleanField(default=False)),
(
"datafile_resource",
models.ForeignKey(
blank=True,
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="dependencies",
to="scanpipe.codebaseresource",
),
),
(
"for_package",
models.ForeignKey(
blank=True,
editable=False,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="dependencies",
to="scanpipe.discoveredpackage",
),
),
(
"project",
models.ForeignKey(
editable=False,
on_delete=django.db.models.deletion.CASCADE,
related_name="discovereddependencies",
to="scanpipe.project",
),
),
],
options={
"verbose_name": "discovered dependency",
"verbose_name_plural": "discovered dependencies",
"ordering": [
"-is_runtime",
"-is_resolved",
"is_optional",
"dependency_uid",
"for_package",
"datafile_resource",
"datasource_id",
],
},
bases=(
scanpipe.models.SaveProjectMessageMixin,
scanpipe.models.UpdateFromDataMixin,
models.Model,
),
),
migrations.AddConstraint(
model_name="discovereddependency",
constraint=models.UniqueConstraint(
condition=models.Q(("dependency_uid", ""), _negated=True),
fields=("project", "dependency_uid"),
name="scanpipe_discovereddependency_unique_dependency_uid_within_project",
),
),
]
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/migrations/0022_create_discovereddependencies_model.py
| 0.58059 | 0.200245 |
0022_create_discovereddependencies_model.py
|
pypi
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("scanpipe", "0024_remove_discoveredpackage_dependencies_data"),
]
operations = [
migrations.RemoveField(
model_name="discoveredpackage",
name="last_modified_date",
),
migrations.AddField(
model_name="discoveredpackage",
name="api_data_url",
field=models.CharField(
blank=True,
help_text="API URL to obtain structured data for this package such as the URL to a JSON or XML api its package repository.",
max_length=1024,
verbose_name="API data URL",
),
),
migrations.AddField(
model_name="discoveredpackage",
name="datasource_id",
field=models.CharField(
blank=True,
help_text="The identifier for the datafile handler used to obtain this package.",
max_length=64,
),
),
migrations.AddField(
model_name="discoveredpackage",
name="file_references",
field=models.JSONField(
blank=True,
default=list,
help_text="List of file paths and details for files referenced in a package manifest. These may not actually exist on the filesystem. The exact semantics and base of these paths is specific to a package type or datafile format.",
),
),
migrations.AddField(
model_name="discoveredpackage",
name="parties",
field=models.JSONField(
blank=True,
default=list,
help_text="A list of parties such as a person, project or organization.",
),
),
migrations.AddField(
model_name="discoveredpackage",
name="repository_download_url",
field=models.CharField(
blank=True,
help_text="Download URL to download the actual archive of code of this package in its package repository. This may be different from the actual download URL.",
max_length=1024,
verbose_name="Repository download URL",
),
),
migrations.AddField(
model_name="discoveredpackage",
name="repository_homepage_url",
field=models.CharField(
blank=True,
help_text="URL to the page for this package in its package repository. This is typically different from the package homepage URL proper.",
max_length=1024,
verbose_name="Repository homepage URL",
),
),
migrations.AddField(
model_name="discoveredpackage",
name="sha256",
field=models.CharField(
blank=True,
help_text="SHA256 checksum hex-encoded, as in sha256sum.",
max_length=64,
verbose_name="SHA256",
),
),
migrations.AddField(
model_name="discoveredpackage",
name="sha512",
field=models.CharField(
blank=True,
help_text="SHA512 checksum hex-encoded, as in sha512sum.",
max_length=128,
verbose_name="SHA512",
),
),
migrations.AlterField(
model_name="codebaseresource",
name="md5",
field=models.CharField(
blank=True,
help_text="MD5 checksum hex-encoded, as in md5sum.",
max_length=32,
verbose_name="MD5",
),
),
migrations.AlterField(
model_name="codebaseresource",
name="sha1",
field=models.CharField(
blank=True,
help_text="SHA1 checksum hex-encoded, as in sha1sum.",
max_length=40,
verbose_name="SHA1",
),
),
migrations.AlterField(
model_name="codebaseresource",
name="sha256",
field=models.CharField(
blank=True,
help_text="SHA256 checksum hex-encoded, as in sha256sum.",
max_length=64,
verbose_name="SHA256",
),
),
migrations.AlterField(
model_name="codebaseresource",
name="sha512",
field=models.CharField(
blank=True,
help_text="SHA512 checksum hex-encoded, as in sha512sum.",
max_length=128,
verbose_name="SHA512",
),
),
migrations.AlterField(
model_name="discoveredpackage",
name="bug_tracking_url",
field=models.CharField(
blank=True,
help_text="URL to the issue or bug tracker for this package.",
max_length=1024,
verbose_name="Bug tracking URL",
),
),
migrations.AlterField(
model_name="discoveredpackage",
name="code_view_url",
field=models.CharField(
blank=True,
help_text="a URL where the code can be browsed online.",
max_length=1024,
verbose_name="Code view URL",
),
),
migrations.AlterField(
model_name="discoveredpackage",
name="download_url",
field=models.CharField(
blank=True,
help_text="A direct download URL.",
max_length=2048,
verbose_name="Download URL",
),
),
migrations.AlterField(
model_name="discoveredpackage",
name="filename",
field=models.CharField(
blank=True,
help_text="File name of a Resource sometimes part of the URI properand sometimes only available through an HTTP header.",
max_length=255,
),
),
migrations.AlterField(
model_name="discoveredpackage",
name="homepage_url",
field=models.CharField(
blank=True,
help_text="URL to the homepage for this package.",
max_length=1024,
verbose_name="Homepage URL",
),
),
migrations.AlterField(
model_name="discoveredpackage",
name="md5",
field=models.CharField(
blank=True,
help_text="MD5 checksum hex-encoded, as in md5sum.",
max_length=32,
verbose_name="MD5",
),
),
migrations.AlterField(
model_name="discoveredpackage",
name="primary_language",
field=models.CharField(
blank=True, help_text="Primary programming language.", max_length=50
),
),
migrations.AlterField(
model_name="discoveredpackage",
name="release_date",
field=models.DateField(
blank=True,
help_text="The date that the package file was created, or when it was posted to its original download source.",
null=True,
),
),
migrations.AlterField(
model_name="discoveredpackage",
name="sha1",
field=models.CharField(
blank=True,
help_text="SHA1 checksum hex-encoded, as in sha1sum.",
max_length=40,
verbose_name="SHA1",
),
),
migrations.AlterField(
model_name="discoveredpackage",
name="size",
field=models.BigIntegerField(
blank=True, help_text="Size in bytes.", null=True
),
),
migrations.AlterField(
model_name="discoveredpackage",
name="vcs_url",
field=models.CharField(
blank=True,
help_text='A URL to the VCS repository in the SPDX form of: "git", "svn", "hg", "bzr", "cvs", https://github.com/nexb/scancode-toolkit.git@405aaa4b3 See SPDX specification "Package Download Location" at https://spdx.org/spdx-specification-21-web-version#h.49x2ik5',
max_length=1024,
verbose_name="VCS URL",
),
),
migrations.AddIndex(
model_name="discoveredpackage",
index=models.Index(
fields=["filename"], name="scanpipe_di_filenam_1e940b_idx"
),
),
migrations.AddIndex(
model_name="discoveredpackage",
index=models.Index(
fields=["primary_language"], name="scanpipe_di_primary_507471_idx"
),
),
migrations.AddIndex(
model_name="discoveredpackage",
index=models.Index(fields=["size"], name="scanpipe_di_size_ddec1a_idx"),
),
migrations.AddIndex(
model_name="discoveredpackage",
index=models.Index(fields=["md5"], name="scanpipe_di_md5_dc8dd2_idx"),
),
migrations.AddIndex(
model_name="discoveredpackage",
index=models.Index(fields=["sha1"], name="scanpipe_di_sha1_0e1e43_idx"),
),
migrations.AddIndex(
model_name="discoveredpackage",
index=models.Index(fields=["sha256"], name="scanpipe_di_sha256_cefc41_idx"),
),
migrations.AddIndex(
model_name="discoveredpackage",
index=models.Index(fields=["sha512"], name="scanpipe_di_sha512_a6344e_idx"),
),
]
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/migrations/0025_remove_discoveredpackage_last_modified_date_and_more.py
| 0.639398 | 0.201499 |
0025_remove_discoveredpackage_last_modified_date_and_more.py
|
pypi
|
from django.db import migrations, models
import django.db.models.deletion
import scanpipe.models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CodebaseResource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('path', models.CharField(help_text='The full path value of a resource (file or directory) in the archive it is from.', max_length=2000)),
('size', models.BigIntegerField(blank=True, help_text='Size in bytes.', null=True)),
('sha1', models.CharField(blank=True, help_text='SHA1 checksum hex-encoded, as in sha1sum.', max_length=40)),
('md5', models.CharField(blank=True, help_text='MD5 checksum hex-encoded, as in md5sum.', max_length=32)),
('sha256', models.CharField(blank=True, help_text='SHA256 checksum hex-encoded, as in sha256sum.', max_length=64)),
('sha512', models.CharField(blank=True, help_text='SHA512 checksum hex-encoded, as in sha512sum.', max_length=128)),
('copyrights', models.JSONField(blank=True, default=list, help_text='List of detected copyright statements (and related detection details).')),
('holders', models.JSONField(blank=True, default=list, help_text='List of detected copyright holders (and related detection details).')),
('authors', models.JSONField(blank=True, default=list, help_text='List of detected authors (and related detection details).')),
('licenses', models.JSONField(blank=True, default=list, help_text='List of license detection details.')),
('license_expressions', models.JSONField(blank=True, default=list, help_text='List of detected license expressions.')),
('emails', models.JSONField(blank=True, default=list, help_text='List of detected emails (and related detection details).')),
('urls', models.JSONField(blank=True, default=list, help_text='List of detected URLs (and related detection details).')),
('rootfs_path', models.CharField(blank=True, help_text='Path relative to some root filesystem root directory. Useful when working on disk images, docker images, and VM images.Eg.: "/usr/bin/bash" for a path of "tarball-extract/rootfs/usr/bin/bash"', max_length=2000)),
('status', models.CharField(blank=True, help_text='Analysis status for this resource.', max_length=30)),
('type', models.CharField(choices=[('file', 'File'), ('directory', 'Directory'), ('symlink', 'Symlink')], help_text='Type of this resource as one of: file, directory, symlink', max_length=10)),
('extra_data', models.JSONField(blank=True, default=dict, help_text='Optional mapping of extra data key/values.')),
('name', models.CharField(blank=True, help_text='File or directory name of this resource.', max_length=255)),
('extension', models.CharField(blank=True, help_text='File extension for this resource (directories do not have an extension).', max_length=100)),
('programming_language', models.CharField(blank=True, help_text='Programming language of this resource if this is a code file.', max_length=50)),
('mime_type', models.CharField(blank=True, help_text='MIME type (aka. media type) for this resource. See https://en.wikipedia.org/wiki/Media_type', max_length=100)),
('file_type', models.CharField(blank=True, help_text='Descriptive file type for this resource.', max_length=1024)),
],
options={
'ordering': ('project', 'path'),
},
bases=(scanpipe.models.SaveProjectMessageMixin, models.Model),
),
migrations.CreateModel(
name='Project',
fields=[
('uuid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='UUID')),
('created_date', models.DateTimeField(auto_now_add=True, db_index=True, help_text='Creation date for this project.')),
('name', models.CharField(db_index=True, help_text='Name for this project.', max_length=100, unique=True)),
('work_directory', models.CharField(editable=False, help_text='Project work directory location.', max_length=2048)),
('extra_data', models.JSONField(default=dict, editable=False)),
],
options={
'ordering': ['-created_date'],
},
),
migrations.CreateModel(
name='Run',
fields=[
('task_id', models.UUIDField(blank=True, editable=False, null=True)),
('task_start_date', models.DateTimeField(blank=True, editable=False, null=True)),
('task_end_date', models.DateTimeField(blank=True, editable=False, null=True)),
('task_exitcode', models.IntegerField(blank=True, editable=False, null=True)),
('task_output', models.TextField(blank=True, editable=False)),
('uuid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='UUID')),
('pipeline', models.CharField(max_length=1024)),
('created_date', models.DateTimeField(auto_now_add=True, db_index=True)),
('description', models.TextField(blank=True)),
('project', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='runs', to='scanpipe.project')),
],
options={
'ordering': ['created_date'],
},
),
migrations.CreateModel(
name='ProjectError',
fields=[
('uuid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='UUID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('model', models.CharField(help_text='Name of the model class.', max_length=100)),
('details', models.JSONField(blank=True, default=dict, help_text='Data that caused the error.')),
('message', models.TextField(blank=True, help_text='Error message.')),
('traceback', models.TextField(blank=True, help_text='Exception traceback.')),
('project', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='projecterrors', to='scanpipe.project')),
],
options={
'ordering': ['created_date'],
},
),
migrations.CreateModel(
name='DiscoveredPackage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(blank=True, help_text='A short code to identify the type of this package. For example: gem for a Rubygem, docker for a container, pypi for a Python Wheel or Egg, maven for a Maven Jar, deb for a Debian package, etc.', max_length=16)),
('namespace', models.CharField(blank=True, help_text='Package name prefix, such as Maven groupid, Docker image owner, GitHub user or organization, etc.', max_length=255)),
('name', models.CharField(blank=True, help_text='Name of the package.', max_length=100)),
('version', models.CharField(blank=True, help_text='Version of the package.', max_length=100)),
('qualifiers', models.CharField(blank=True, help_text='Extra qualifying data for a package such as the name of an OS, architecture, distro, etc.', max_length=1024)),
('subpath', models.CharField(blank=True, help_text='Extra subpath within a package, relative to the package root.', max_length=200)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True, verbose_name='UUID')),
('last_modified_date', models.DateTimeField(blank=True, db_index=True, help_text='Timestamp set when a Package is created or modified', null=True)),
('filename', models.CharField(blank=True, db_index=True, help_text='File name of a Resource sometimes part of the URI properand sometimes only available through an HTTP header.', max_length=255)),
('primary_language', models.CharField(blank=True, help_text='Primary programming language', max_length=50)),
('description', models.TextField(blank=True, help_text='Description for this package. By convention the first line should be a summary when available.')),
('release_date', models.DateField(blank=True, db_index=True, help_text='The date that the package file was created, or when it was posted to its original download source.', null=True)),
('homepage_url', models.CharField(blank=True, help_text='URL to the homepage for this package.', max_length=1024)),
('download_url', models.CharField(blank=True, help_text='A direct download URL.', max_length=2048)),
('size', models.BigIntegerField(blank=True, db_index=True, help_text='Size in bytes.', null=True)),
('sha1', models.CharField(blank=True, db_index=True, help_text='SHA1 checksum hex-encoded, as in sha1sum.', max_length=40, verbose_name='download SHA1')),
('md5', models.CharField(blank=True, db_index=True, help_text='MD5 checksum hex-encoded, as in md5sum.', max_length=32, verbose_name='download MD5')),
('bug_tracking_url', models.CharField(blank=True, help_text='URL to the issue or bug tracker for this package', max_length=1024)),
('code_view_url', models.CharField(blank=True, help_text='a URL where the code can be browsed online', max_length=1024)),
('vcs_url', models.CharField(blank=True, help_text='a URL to the VCS repository in the SPDX form of: "git", "svn", "hg", "bzr", "cvs", https://github.com/nexb/scancode-toolkit.git@405aaa4b3 See SPDX specification "Package Download Location" at https://spdx.org/spdx-specification-21-web-version#h.49x2ik5 ', max_length=1024)),
('copyright', models.TextField(blank=True, help_text='Copyright statements for this package. Typically one per line.')),
('license_expression', models.TextField(blank=True, help_text='The normalized license expression for this package as derived from its declared license.')),
('declared_license', models.TextField(blank=True, help_text='The declared license mention or tag or text as found in a package manifest.')),
('notice_text', models.TextField(blank=True, help_text='A notice text for this package.')),
('manifest_path', models.CharField(blank=True, help_text='A relative path to the manifest file if any, such as a Maven .pom or a npm package.json.', max_length=1024)),
('contains_source_code', models.BooleanField(blank=True, null=True)),
('missing_resources', models.JSONField(blank=True, default=list)),
('modified_resources', models.JSONField(blank=True, default=list)),
('keywords', models.JSONField(blank=True, default=list)),
('source_packages', models.JSONField(blank=True, default=list)),
('codebase_resources', models.ManyToManyField(related_name='discovered_packages', to='scanpipe.CodebaseResource')),
('project', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='discoveredpackages', to='scanpipe.project')),
],
options={
'ordering': ['uuid'],
},
bases=(scanpipe.models.SaveProjectMessageMixin, models.Model),
),
migrations.AddField(
model_name='codebaseresource',
name='project',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='codebaseresources', to='scanpipe.project'),
),
migrations.AlterUniqueTogether(
name='codebaseresource',
unique_together={('project', 'path')},
),
]
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/migrations/0001_initial.py
| 0.585338 | 0.232071 |
0001_initial.py
|
pypi
|
from django.db import migrations
from django.db.models import Q
from django.conf import settings
def compute_package_declared_license_expression_spdx(apps, schema_editor):
"""
Compute DiscoveredPackage `declared_license_expression_spdx`, when missing,
from `declared_license_expression`, when available.
"""
from licensedcode.cache import build_spdx_license_expression
if settings.IS_TESTS:
return
DiscoveredPackage = apps.get_model("scanpipe", "DiscoveredPackage")
queryset = DiscoveredPackage.objects.filter(
~Q(declared_license_expression="") & Q(declared_license_expression_spdx="")
).only("declared_license_expression")
object_count = queryset.count()
print(f"\nCompute declared_license_expression_spdx for {object_count:,} packages.")
chunk_size = 2000
iterator = queryset.iterator(chunk_size=chunk_size)
unsaved_objects = []
for index, package in enumerate(iterator, start=1):
if spdx := build_spdx_license_expression(package.declared_license_expression):
package.declared_license_expression_spdx = spdx
unsaved_objects.append(package)
if not (index % chunk_size) and unsaved_objects:
print(f" {index:,} / {object_count:,} computed")
print("Updating DB objects...")
DiscoveredPackage.objects.bulk_update(
objs=unsaved_objects,
fields=["declared_license_expression_spdx"],
batch_size=1000,
)
def compute_resource_detected_license_expression(apps, schema_editor):
"""
Compute CodebaseResource `detected_license_expression` and
`detected_license_expression_spdx` from old `license_expressions` field.
"""
from license_expression import combine_expressions
from licensedcode.cache import build_spdx_license_expression
if settings.IS_TESTS:
return
CodebaseResource = apps.get_model("scanpipe", "CodebaseResource")
ProjectError = apps.get_model("scanpipe", "ProjectError")
queryset = CodebaseResource.objects.filter(~Q(license_expressions=[])).only(
"license_expressions"
)
object_count = queryset.count()
print(f"\nCompute detected_license_expression for {object_count:,} resources.")
chunk_size = 2000
iterator = queryset.iterator(chunk_size=chunk_size)
unsaved_objects = []
for index, resource in enumerate(iterator, start=1):
combined_expression = str(combine_expressions(resource.license_expressions))
# gpl-2.0 OR broadcom-linking-unmodified OR proprietary-license
# build_spdx_license_expression("broadcom-linking-unmodified")
# AttributeError: 'LicenseSymbol' object has no attribute 'wrapped'
try:
license_expression_spdx = build_spdx_license_expression(combined_expression)
except Exception as error:
ProjectError.objects.create(
project=resource.project,
message=str(error),
model=resource.__class__,
details={"combined_expression": combined_expression}
)
continue
resource.detected_license_expression = combined_expression
resource.detected_license_expression_spdx = license_expression_spdx
unsaved_objects.append(resource)
if not (index % chunk_size) and unsaved_objects:
print(f" {index:,} / {object_count:,} computed")
print("Updating DB objects...")
CodebaseResource.objects.bulk_update(
objs=unsaved_objects,
fields=[
"detected_license_expression",
"detected_license_expression_spdx",
],
batch_size=1000,
)
def _convert_matches_to_detections(license_matches):
"""
Return a list of scancode v32 LicenseDetection mappings from provided
``license_matches``: a list of the scancode v31 LicenseMatch mappings.
"""
from license_expression import combine_expressions
from licensedcode.detection import get_uuid_on_content
from commoncode.text import python_safe_name
match_attributes = ["score", "start_line", "end_line", "matched_text"]
rule_attributes = [
"matched_length",
"match_coverage",
"matcher",
"rule_relevance",
]
license_detection = {}
detection_matches = []
for match in license_matches:
detection_match = {}
for attribute in match_attributes:
detection_match[attribute] = match[attribute]
for attribute in rule_attributes:
detection_match[attribute] = match["matched_rule"][attribute]
detection_match["rule_identifier"] = match["matched_rule"]["identifier"]
detection_match["license_expression"] = match["matched_rule"][
"license_expression"
]
detection_match["rule_url"] = None
detection_matches.append(detection_match)
license_expressions = [match["license_expression"] for match in detection_matches]
hashable_details = tuple(
[
(match["score"], match["rule_identifier"], match["matched_text"])
for match in detection_matches
]
)
uuid = get_uuid_on_content(hashable_details)
license_detection["matches"] = detection_matches
license_detection["license_expression"] = str(
combine_expressions(license_expressions)
)
license_detection["identifier"] = "{}-{}".format(
python_safe_name(license_detection["license_expression"]), uuid
)
return [license_detection]
def compute_resource_license_detections(apps, schema_editor):
"""Compute CodebaseResource `license_detections` from old `licenses` field."""
if settings.IS_TESTS:
return
CodebaseResource = apps.get_model("scanpipe", "CodebaseResource")
queryset = CodebaseResource.objects.filter(~Q(licenses=[])).only("licenses")
object_count = queryset.count()
print(f"\nCompute license_detections for {object_count:,} resources.")
chunk_size = 2000
iterator = queryset.iterator(chunk_size=chunk_size)
unsaved_objects = []
for index, resource in enumerate(iterator, start=1):
detections = _convert_matches_to_detections(resource.licenses)
resource.license_detections = detections
unsaved_objects.append(resource)
if not (index % chunk_size):
print(f" {index:,} / {object_count:,} computed")
print("Updating DB objects...")
# Keeping the batch_size small as the `license_detections` content is often large,
# and it may raise `django.db.utils.OperationalError: out of memory`
CodebaseResource.objects.bulk_update(
objs=unsaved_objects,
fields=["license_detections"],
batch_size=50,
)
class Migration(migrations.Migration):
dependencies = [
("scanpipe", "0030_scancode_toolkit_v32_model_updates"),
]
operations = [
migrations.RunPython(
compute_package_declared_license_expression_spdx,
reverse_code=migrations.RunPython.noop,
),
migrations.RunPython(
compute_resource_detected_license_expression,
reverse_code=migrations.RunPython.noop,
),
migrations.RunPython(
compute_resource_license_detections,
reverse_code=migrations.RunPython.noop,
),
]
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/migrations/0031_scancode_toolkit_v32_data_updates.py
| 0.739046 | 0.228156 |
0031_scancode_toolkit_v32_data_updates.py
|
pypi
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("scanpipe", "0029_codebaseresource_scanpipe_co_type_ea1dd7_idx_and_more"),
]
operations = [
migrations.RemoveIndex(
model_name="discoveredpackage",
name="scanpipe_di_license_e8ce32_idx",
),
migrations.RenameField(
model_name="discoveredpackage",
old_name="license_expression",
new_name="declared_license_expression",
),
migrations.AlterField(
model_name="discoveredpackage",
name="declared_license_expression",
field=models.TextField(
blank=True,
help_text="The license expression for this package typically derived from its extracted_license_statement or from some other type-specific routine or convention.",
),
),
migrations.RenameField(
model_name="discoveredpackage",
old_name="declared_license",
new_name="extracted_license_statement",
),
migrations.AlterField(
model_name="discoveredpackage",
name="extracted_license_statement",
field=models.TextField(
blank=True,
help_text="The license statement mention, tag or text as found in a package manifest and extracted. This can be a string, a list or dict of strings possibly nested, as found originally in the manifest.",
),
),
migrations.AddField(
model_name="discoveredpackage",
name="declared_license_expression_spdx",
field=models.TextField(
blank=True,
help_text="The SPDX license expression for this package converted from its declared_license_expression.",
),
),
migrations.AddField(
model_name="discoveredpackage",
name="holder",
field=models.TextField(
blank=True,
help_text="Holders for this package. Typically one per line.",
),
),
migrations.AddField(
model_name="discoveredpackage",
name="license_detections",
field=models.JSONField(
blank=True,
default=list,
help_text="A list of LicenseDetection mappings typically derived from its extracted_license_statement or from some other type-specific routine or convention.",
),
),
migrations.AddField(
model_name="discoveredpackage",
name="other_license_detections",
field=models.JSONField(
blank=True,
default=list,
help_text="A list of LicenseDetection mappings which is different from the declared_license_expression, (i.e. not the primary license) These are detections for the detection for the license expressions in other_license_expression. ",
),
),
migrations.AddField(
model_name="discoveredpackage",
name="other_license_expression",
field=models.TextField(
blank=True,
help_text="The license expression for this package which is different from the declared_license_expression, (i.e. not the primary license) routine or convention.",
),
),
migrations.AddField(
model_name="discoveredpackage",
name="other_license_expression_spdx",
field=models.TextField(
blank=True,
help_text="The other SPDX license expression for this package converted from its other_license_expression.",
),
),
migrations.AddField(
model_name="codebaseresource",
name="detected_license_expression",
field=models.TextField(blank=True, help_text="The license expression summarizing the license info for this resource, combined from all the license detections"),
),
migrations.AddField(
model_name="codebaseresource",
name="detected_license_expression_spdx",
field=models.TextField(blank=True, help_text="The detected license expression for this file, with SPDX license keys"),
),
migrations.AddField(
model_name="codebaseresource",
name="license_detections",
field=models.JSONField(
blank=True, default=list, help_text="List of license detection details."
),
),
migrations.AddField(
model_name="codebaseresource",
name="license_clues",
field=models.JSONField(
blank=True, default=list, help_text="List of license matches that are not proper detections and potentially just clues to licenses or likely false positives. Those are not included in computing the detected license expression for the resource."
),
),
migrations.AddField(
model_name="codebaseresource",
name="percentage_of_license_text",
field=models.FloatField(blank=True, help_text="Percentage of file words detected as license text or notice.", null=True),
),
]
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/migrations/0030_scancode_toolkit_v32_model_updates.py
| 0.717309 | 0.215289 |
0030_scancode_toolkit_v32_model_updates.py
|
pypi
|
from django.apps import apps
from rest_framework import serializers
from taggit.serializers import TaggitSerializer
from taggit.serializers import TagListSerializerField
from scanpipe.api import ExcludeFromListViewMixin
from scanpipe.models import CodebaseRelation
from scanpipe.models import CodebaseResource
from scanpipe.models import DiscoveredDependency
from scanpipe.models import DiscoveredPackage
from scanpipe.models import Project
from scanpipe.models import ProjectMessage
from scanpipe.models import Run
from scanpipe.pipes import count_group_by
from scanpipe.pipes.fetch import fetch_urls
scanpipe_app = apps.get_app_config("scanpipe")
class SerializerExcludeFieldsMixin:
"""
A Serializer mixin that takes an additional `exclude_fields` argument to
exclude specific fields from the serialized content.
Inspired by https://www.django-rest-framework.org/api-guide/serializers/#example
"""
def __init__(self, *args, **kwargs):
exclude_fields = kwargs.pop("exclude_fields", [])
super().__init__(*args, **kwargs)
for field_name in exclude_fields:
self.fields.pop(field_name)
class PipelineChoicesMixin:
def __init__(self, *args, **kwargs):
"""
Load the pipeline field choices on the init class instead of the module
import, which ensures all pipelines are first properly loaded.
"""
super().__init__(*args, **kwargs)
self.fields["pipeline"].choices = scanpipe_app.get_pipeline_choices()
class OrderedMultipleChoiceField(serializers.MultipleChoiceField):
"""Forcing outputs as list() in place of set() to keep the ordering integrity."""
def to_internal_value(self, data):
if isinstance(data, str):
data = [data]
if not hasattr(data, "__iter__"):
self.fail("not_a_list", input_type=type(data).__name__)
if not self.allow_empty and len(data) == 0:
self.fail("empty")
return [
super(serializers.MultipleChoiceField, self).to_internal_value(item)
for item in data
]
def to_representation(self, value):
return [self.choice_strings_to_values.get(str(item), item) for item in value]
class StrListField(serializers.ListField):
"""ListField that allows also a str as value."""
def to_internal_value(self, data):
if isinstance(data, str):
data = [data]
return super().to_internal_value(data)
class RunSerializer(SerializerExcludeFieldsMixin, serializers.ModelSerializer):
project = serializers.HyperlinkedRelatedField(
view_name="project-detail", read_only=True
)
class Meta:
model = Run
fields = [
"url",
"pipeline_name",
"status",
"description",
"project",
"uuid",
"created_date",
"scancodeio_version",
"task_id",
"task_start_date",
"task_end_date",
"task_exitcode",
"task_output",
"log",
"execution_time",
]
class ProjectSerializer(
ExcludeFromListViewMixin,
PipelineChoicesMixin,
TaggitSerializer,
serializers.ModelSerializer,
):
pipeline = OrderedMultipleChoiceField(
choices=(),
required=False,
write_only=True,
)
execute_now = serializers.BooleanField(
write_only=True,
help_text="Execute pipeline now",
)
upload_file = serializers.FileField(write_only=True, required=False)
input_urls = StrListField(
write_only=True,
required=False,
style={"base_template": "textarea.html"},
)
webhook_url = serializers.CharField(write_only=True, required=False)
next_run = serializers.CharField(source="get_next_run", read_only=True)
runs = RunSerializer(many=True, read_only=True)
input_sources = serializers.JSONField(source="input_sources_list", read_only=True)
codebase_resources_summary = serializers.SerializerMethodField()
discovered_packages_summary = serializers.SerializerMethodField()
discovered_dependencies_summary = serializers.SerializerMethodField()
codebase_relations_summary = serializers.SerializerMethodField()
labels = TagListSerializerField(required=False)
class Meta:
model = Project
fields = (
"name",
"url",
"uuid",
"upload_file",
"input_urls",
"webhook_url",
"created_date",
"is_archived",
"notes",
"labels",
"settings",
"pipeline",
"execute_now",
"input_sources",
"input_root",
"output_root",
"next_run",
"runs",
"extra_data",
"message_count",
"resource_count",
"package_count",
"dependency_count",
"relation_count",
"codebase_resources_summary",
"discovered_packages_summary",
"discovered_dependencies_summary",
"codebase_relations_summary",
)
exclude_from_list_view = [
"settings",
"input_root",
"output_root",
"extra_data",
"message_count",
"resource_count",
"package_count",
"dependency_count",
"relation_count",
"codebase_resources_summary",
"discovered_packages_summary",
"discovered_dependencies_summary",
"codebase_relations_summary",
]
def get_codebase_resources_summary(self, project):
queryset = project.codebaseresources.all()
return count_group_by(queryset, "status")
def get_discovered_packages_summary(self, project):
base_qs = project.discoveredpackages
return {
"total": base_qs.count(),
"with_missing_resources": base_qs.exclude(missing_resources=[]).count(),
"with_modified_resources": base_qs.exclude(modified_resources=[]).count(),
}
def get_discovered_dependencies_summary(self, project):
base_qs = project.discovereddependencies
return {
"total": base_qs.count(),
"is_runtime": base_qs.filter(is_runtime=True).count(),
"is_optional": base_qs.filter(is_optional=True).count(),
"is_resolved": base_qs.filter(is_resolved=True).count(),
}
def get_codebase_relations_summary(self, project):
queryset = project.codebaserelations.all()
return count_group_by(queryset, "map_type")
def create(self, validated_data):
"""
Create a new `project` with `upload_file` and `pipeline` as optional.
The `execute_now` parameter can be set to execute the Pipeline on creation.
Note that even when `execute_now` is True, the pipeline execution is always
delayed after the actual database save and commit of the Project creation
process, using the `transaction.on_commit` callback system.
This ensures the Project data integrity before running any pipelines.
"""
upload_file = validated_data.pop("upload_file", None)
input_urls = validated_data.pop("input_urls", [])
pipeline = validated_data.pop("pipeline", [])
execute_now = validated_data.pop("execute_now", False)
webhook_url = validated_data.pop("webhook_url", None)
downloads, errors = fetch_urls(input_urls)
if errors:
raise serializers.ValidationError("Could not fetch: " + "\n".join(errors))
project = super().create(validated_data)
if upload_file:
project.add_uploads([upload_file])
if downloads:
project.add_downloads(downloads)
if webhook_url:
project.add_webhook_subscription(webhook_url)
for pipeline_name in pipeline:
project.add_pipeline(pipeline_name, execute_now)
return project
class CodebaseResourceSerializer(serializers.ModelSerializer):
for_packages = serializers.JSONField()
compliance_alert = serializers.CharField()
class Meta:
model = CodebaseResource
fields = [
"path",
"type",
"name",
"status",
"tag",
"extension",
"size",
"md5",
"sha1",
"sha256",
"sha512",
"mime_type",
"file_type",
"programming_language",
"is_binary",
"is_text",
"is_archive",
"is_media",
"is_key_file",
"detected_license_expression",
"detected_license_expression_spdx",
"license_detections",
"license_clues",
"percentage_of_license_text",
"compliance_alert",
"copyrights",
"holders",
"authors",
"package_data",
"for_packages",
"emails",
"urls",
"extra_data",
]
class DiscoveredPackageSerializer(serializers.ModelSerializer):
purl = serializers.CharField(source="package_url")
compliance_alert = serializers.CharField()
class Meta:
model = DiscoveredPackage
fields = [
"purl",
"type",
"namespace",
"name",
"version",
"qualifiers",
"subpath",
"primary_language",
"description",
"release_date",
"parties",
"keywords",
"homepage_url",
"download_url",
"bug_tracking_url",
"code_view_url",
"vcs_url",
"repository_homepage_url",
"repository_download_url",
"api_data_url",
"size",
"md5",
"sha1",
"sha256",
"sha512",
"copyright",
"holder",
"declared_license_expression",
"declared_license_expression_spdx",
"license_detections",
"other_license_expression",
"other_license_expression_spdx",
"other_license_detections",
"extracted_license_statement",
"compliance_alert",
"notice_text",
"source_packages",
"extra_data",
"package_uid",
"datasource_id",
"file_references",
"missing_resources",
"modified_resources",
"affected_by_vulnerabilities",
]
class DiscoveredDependencySerializer(serializers.ModelSerializer):
purl = serializers.ReadOnlyField()
for_package_uid = serializers.ReadOnlyField()
datafile_path = serializers.ReadOnlyField()
package_type = serializers.ReadOnlyField(source="type")
class Meta:
model = DiscoveredDependency
fields = [
"purl",
"extracted_requirement",
"scope",
"is_runtime",
"is_optional",
"is_resolved",
"dependency_uid",
"for_package_uid",
"datafile_path",
"datasource_id",
"package_type",
"affected_by_vulnerabilities",
]
class CodebaseRelationSerializer(serializers.ModelSerializer):
from_resource = serializers.ReadOnlyField(source="from_resource.path")
to_resource = serializers.ReadOnlyField(source="to_resource.path")
class Meta:
model = CodebaseRelation
fields = [
"to_resource",
"status",
"map_type",
"score",
"from_resource",
]
class ProjectMessageSerializer(serializers.ModelSerializer):
traceback = serializers.SerializerMethodField()
class Meta:
model = ProjectMessage
fields = [
"uuid",
"severity",
"description",
"model",
"details",
"traceback",
"created_date",
]
def get_traceback(self, project_error):
return project_error.traceback.splitlines()
class PipelineSerializer(PipelineChoicesMixin, serializers.ModelSerializer):
"""Serializer used in the `ProjectViewSet.add_pipeline` action."""
pipeline = serializers.ChoiceField(
choices=(),
required=True,
write_only=True,
)
execute_now = serializers.BooleanField(write_only=True)
class Meta:
model = Run
fields = [
"pipeline",
"execute_now",
]
def get_model_serializer(model_class):
"""Return a Serializer class that ia related to a given `model_class`."""
serializer = {
CodebaseResource: CodebaseResourceSerializer,
DiscoveredPackage: DiscoveredPackageSerializer,
DiscoveredDependency: DiscoveredDependencySerializer,
CodebaseRelation: CodebaseRelationSerializer,
ProjectMessage: ProjectMessageSerializer,
}.get(model_class, None)
if not serializer:
raise LookupError(f"No Serializer found for {model_class}")
return serializer
def get_serializer_fields(model_class):
"""
Return a list of fields declared on the Serializer that are related to the
given `model_class`.
"""
serializer = get_model_serializer(model_class)
fields = list(serializer().get_fields().keys())
return fields
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/api/serializers.py
| 0.848251 | 0.196152 |
serializers.py
|
pypi
|
import json
import re
from contextlib import suppress
from dataclasses import dataclass
from dataclasses import field
from datetime import datetime
from pathlib import Path
from typing import List # Python 3.8 compatibility
SPDX_SPEC_VERSION = "2.3"
SPDX_LICENSE_LIST_VERSION = "3.20"
SPDX_SCHEMA_NAME = "spdx-schema-2.3.json"
SPDX_SCHEMA_PATH = Path(__file__).parent / "schemas" / SPDX_SCHEMA_NAME
SPDX_SCHEMA_URL = (
"https://raw.githubusercontent.com/spdx/spdx-spec/v2.3/schemas/spdx-schema.json"
)
"""
Generate SPDX Documents.
Spec documentation: https://spdx.github.io/spdx-spec/v2.3/
Usage::
import pathlib
from scanpipe.pipes import spdx
creation_info = spdx.CreationInfo(
person_name="John Doe",
person_email="[email protected]",
organization_name="Starship",
tool="SPDXCode-1.0",
)
package1 = spdx.Package(
spdx_id="SPDXRef-package1",
name="lxml",
version="3.3.5",
license_concluded="LicenseRef-1",
checksums=[
spdx.Checksum(
algorithm="SHA1", value="10c72b88de4c5f3095ebe20b4d8afbedb32b8f"
),
spdx.Checksum(algorithm="MD5", value="56770c1a2df6e0dc51c491f0a5b9d865"),
],
external_refs=[
spdx.ExternalRef(
category="PACKAGE-MANAGER",
type="purl",
locator="pkg:pypi/[email protected]",
),
]
)
document = spdx.Document(
name="Document name",
namespace="https://[CreatorWebsite]/[pathToSpdx]/[DocumentName]-[UUID]",
creation_info=creation_info,
packages=[package1],
extracted_licenses=[
spdx.ExtractedLicensingInfo(
license_id="LicenseRef-1",
extracted_text="License Text",
name="License 1",
see_alsos=["https://license1.text"],
),
],
comment="This document was created using SPDXCode-1.0",
)
# Display document content:
print(document.as_json())
# Validate document
schema = pathlib.Path(spdx.SPDX_JSON_SCHEMA_LOCATION).read_text()
document.validate(schema)
# Write document to a file:
with open("document_name.spdx.json", "w") as f:
f.write(document.as_json())
"""
@dataclass
class CreationInfo:
"""
One instance is required for each SPDX file produced.
It provides the necessary information for forward and backward compatibility for
processing tools.
"""
person_name: str = ""
organization_name: str = ""
tool: str = ""
person_email: str = ""
organization_email: str = ""
license_list_version: str = SPDX_LICENSE_LIST_VERSION
comment: str = ""
"""
Identify when the SPDX document was originally created.
The date is to be specified according to combined date and time in UTC format as
specified in ISO 8601 standard.
Format: YYYY-MM-DDThh:mm:ssZ
"""
created: str = field(
default_factory=lambda: datetime.utcnow().isoformat(timespec="seconds") + "Z",
)
def as_dict(self):
"""Return the data as a serializable dict."""
data = {
"created": self.created,
"creators": self.get_creators_spdx(),
}
if self.license_list_version:
data["licenseListVersion"] = self.license_list_version
if self.comment:
data["comment"] = self.comment
return data
@classmethod
def from_data(cls, data):
return cls(
**cls.get_creators_dict(data.get("creators", [])),
license_list_version=data.get("licenseListVersion"),
comment=data.get("comment"),
created=data.get("created"),
)
def get_creators_spdx(self):
"""Return the `creators` list from related field values."""
creators = []
if self.person_name:
creators.append(f"Person: {self.person_name} ({self.person_email})")
if self.organization_name:
creators.append(
f"Organization: {self.organization_name} ({self.organization_email})"
)
if self.tool:
creators.append(f"Tool: {self.tool}")
if not creators:
raise ValueError("Missing values to build `creators` list.")
return creators
@staticmethod
def get_creators_dict(creators_data):
"""Return the `creators` dict from SPDX data."""
creators_dict = {}
for creator in creators_data:
creator_type, value = creator.split(": ")
creator_type = creator_type.lower()
if creator_type == "tool":
creators_dict["tool"] = value
else:
if "(" in value:
name, email = value.split(" (")
creators_dict[f"{creator_type}_name"] = name
creators_dict[f"{creator_type}_email"] = email.split(")")[0]
else:
creators_dict[f"{creator_type}_name"] = value
return creators_dict
@dataclass
class Checksum:
"""
The checksum provides a mechanism that can be used to verify that the contents of
a File or Package have not changed.
"""
algorithm: str
value: str
def as_dict(self):
"""Return the data as a serializable dict."""
return {
"algorithm": self.algorithm.upper(),
"checksumValue": self.value,
}
@classmethod
def from_data(cls, data):
return cls(
algorithm=data.get("algorithm"),
value=data.get("checksumValue"),
)
@dataclass
class ExternalRef:
"""
An External Reference allows a Package to reference an external source of
additional information, metadata, enumerations, asset identifiers, or
downloadable content believed to be relevant to the Package.
"""
category: str # Supported values: OTHER, SECURITY, PERSISTENT-ID, PACKAGE-MANAGER
type: str
locator: str
comment: str = ""
def as_dict(self):
"""Return the data as a serializable dict."""
data = {
"referenceCategory": self.category,
"referenceType": self.type,
"referenceLocator": self.locator,
}
if self.comment:
data["comment"] = self.comment
return data
@classmethod
def from_data(cls, data):
return cls(
category=data.get("referenceCategory"),
type=data.get("referenceType"),
locator=data.get("referenceLocator"),
comment=data.get("comment"),
)
@dataclass
class ExtractedLicensingInfo:
"""
An ExtractedLicensingInfo represents a license or licensing notice that was found
in a package, file or snippet.
Any license text that is recognized as a license may be represented as a License
rather than an ExtractedLicensingInfo.
"""
license_id: str
extracted_text: str
name: str = ""
comment: str = ""
see_alsos: List[str] = field(default_factory=list)
def as_dict(self):
"""Return the data as a serializable dict."""
required_data = {
"licenseId": self.license_id,
"extractedText": self.extracted_text,
}
optional_data = {
"name": self.name,
"comment": self.comment,
"seeAlsos": self.see_alsos,
}
optional_data = {key: value for key, value in optional_data.items() if value}
return {**required_data, **optional_data}
@classmethod
def from_data(cls, data):
return cls(
license_id=data.get("licenseId"),
extracted_text=data.get("extractedText"),
name=data.get("name"),
comment=data.get("comment"),
see_alsos=data.get("seeAlsos"),
)
@dataclass
class Package:
"""Packages referenced in the SPDX document."""
spdx_id: str
name: str
download_location: str = "NOASSERTION"
license_declared: str = "NOASSERTION"
license_concluded: str = "NOASSERTION"
copyright_text: str = "NOASSERTION"
files_analyzed: bool = False
version: str = ""
supplier: str = ""
originator: str = ""
homepage: str = ""
filename: str = ""
description: str = ""
summary: str = ""
source_info: str = ""
release_date: str = ""
built_date: str = ""
valid_until_date: str = ""
# Supported values:
# APPLICATION | FRAMEWORK | LIBRARY | CONTAINER | OPERATING-SYSTEM |
# DEVICE | FIRMWARE | SOURCE | ARCHIVE | FILE | INSTALL | OTHER
primary_package_purpose: str = ""
comment: str = ""
license_comments: str = ""
checksums: List[Checksum] = field(default_factory=list)
external_refs: List[ExternalRef] = field(default_factory=list)
attribution_texts: List[str] = field(default_factory=list)
def as_dict(self):
"""Return the data as a serializable dict."""
spdx_id = str(self.spdx_id)
if not spdx_id.startswith("SPDXRef-"):
spdx_id = f"SPDXRef-{spdx_id}"
required_data = {
"name": self.name,
"SPDXID": spdx_id,
"downloadLocation": self.download_location or "NOASSERTION",
"licenseConcluded": self.license_concluded or "NOASSERTION",
"copyrightText": self.copyright_text or "NOASSERTION",
"filesAnalyzed": self.files_analyzed,
}
optional_data = {
"versionInfo": self.version,
"licenseDeclared": self.license_declared,
"supplier": self.supplier,
"originator": self.originator,
"homepage": self.homepage,
"packageFileName": self.filename,
"description": self.description,
"summary": self.summary,
"sourceInfo": self.source_info,
"releaseDate": self.date_to_iso(self.release_date),
"builtDate": self.date_to_iso(self.built_date),
"validUntilDate": self.date_to_iso(self.valid_until_date),
"primaryPackagePurpose": self.primary_package_purpose,
"comment": self.comment,
"licenseComments": self.license_comments,
"checksums": [checksum.as_dict() for checksum in self.checksums],
"externalRefs": [ref.as_dict() for ref in self.external_refs],
"attributionTexts": self.attribution_texts,
}
optional_data = {key: value for key, value in optional_data.items() if value}
return {**required_data, **optional_data}
@staticmethod
def date_to_iso(date_str):
"""Convert a provided `date_str` to the SPDX format: `YYYY-MM-DDThh:mm:ssZ`."""
if not date_str:
return
if date_str.endswith("Z"):
date_str = date_str[:-1]
as_datetime = datetime.fromisoformat(date_str)
return as_datetime.isoformat(timespec="seconds") + "Z"
@classmethod
def from_data(cls, data):
return cls(
spdx_id=data.get("SPDXID"),
name=data.get("name"),
download_location=data.get("downloadLocation"),
license_concluded=data.get("licenseConcluded"),
copyright_text=data.get("copyrightText"),
version=data.get("versionInfo"),
license_declared=data.get("licenseDeclared"),
supplier=data.get("supplier"),
originator=data.get("originator"),
homepage=data.get("homepage"),
filename=data.get("packageFileName"),
description=data.get("description"),
summary=data.get("summary"),
source_info=data.get("sourceInfo"),
release_date=data.get("releaseDate"),
built_date=data.get("builtDate"),
valid_until_date=data.get("validUntilDate"),
primary_package_purpose=data.get("primaryPackagePurpose"),
comment=data.get("comment"),
license_comments=data.get("licenseComments"),
attribution_texts=data.get("attributionTexts"),
checksums=[
Checksum.from_data(checksum_data)
for checksum_data in data.get("checksums", [])
],
external_refs=[
ExternalRef.from_data(external_ref_data)
for external_ref_data in data.get("externalRefs", [])
],
)
@dataclass
class File:
"""Files referenced in the SPDX document."""
spdx_id: str
name: str
checksums: List[Checksum] = field(default_factory=list)
license_concluded: str = "NOASSERTION"
copyright_text: str = "NOASSERTION"
license_in_files: List[str] = field(default_factory=list)
contributors: List[str] = field(default_factory=list)
notice_text: str = ""
# Supported values:
# SOURCE | BINARY | ARCHIVE | APPLICATION | AUDIO | IMAGE | TEXT | VIDEO |
# DOCUMENTATION | SPDX | OTHER
types: List[str] = field(default_factory=list)
attribution_texts: List[str] = field(default_factory=list)
comment: str = ""
license_comments: str = ""
def as_dict(self):
"""Return the data as a serializable dict."""
required_data = {
"SPDXID": self.spdx_id,
"fileName": self.name,
"checksums": [checksum.as_dict() for checksum in self.checksums],
}
optional_data = {
"fileTypes": self.types,
"copyrightText": self.copyright_text or "NOASSERTION",
"fileContributors": self.contributors,
"licenseConcluded": self.license_concluded or "NOASSERTION",
"licenseInfoInFiles": self.license_in_files,
"noticeText": self.notice_text,
"comment": self.comment,
"licenseComments": self.license_comments,
"attributionTexts": self.attribution_texts,
}
optional_data = {key: value for key, value in optional_data.items() if value}
return {**required_data, **optional_data}
@classmethod
def from_data(cls, data):
return cls(
spdx_id=data.get("SPDXID"),
name=data.get("fileName"),
checksums=[
Checksum.from_data(checksum_data)
for checksum_data in data.get("checksums", [])
],
types=data.get("fileTypes"),
copyright_text=data.get("copyrightText"),
contributors=data.get("fileContributors"),
license_concluded=data.get("licenseConcluded"),
license_in_files=data.get("licenseInfoInFiles"),
notice_text=data.get("noticeText"),
comment=data.get("comment"),
license_comments=data.get("licenseComments"),
attribution_texts=data.get("attributionTexts"),
)
@dataclass
class Relationship:
"""
Represent the relationship between two SPDX elements.
For example, you can represent a relationship between two different Files,
between a Package and a File, between two Packages,
or between one SPDXDocument and another SPDXDocument.
"""
spdx_id: str
related_spdx_id: str
relationship: str
comment: str = ""
def as_dict(self):
"""Return the SPDX relationship as a serializable dict."""
data = {
"spdxElementId": self.spdx_id,
"relatedSpdxElement": self.related_spdx_id,
"relationshipType": self.relationship,
}
if self.comment:
data["comment"] = self.comment
return data
@classmethod
def from_data(cls, data):
return cls(
spdx_id=data.get("spdxElementId"),
related_spdx_id=data.get("relatedSpdxElement"),
relationship=data.get("relationshipType"),
comment=data.get("comment"),
)
@dataclass
class Document:
"""
Collection of section instances each of which contains information about software
organized using the SPDX format.
"""
name: str
namespace: str
creation_info: CreationInfo
packages: List[Package]
spdx_id: str = "SPDXRef-DOCUMENT"
version: str = SPDX_SPEC_VERSION
data_license: str = "CC0-1.0"
comment: str = ""
files: List[File] = field(default_factory=list)
extracted_licenses: List[ExtractedLicensingInfo] = field(default_factory=list)
relationships: List[Relationship] = field(default_factory=list)
def as_dict(self):
"""Return the SPDX document as a serializable dict."""
data = {
"spdxVersion": f"SPDX-{self.version}",
"dataLicense": self.data_license,
"SPDXID": self.spdx_id,
"name": self.safe_document_name(self.name),
"documentNamespace": self.namespace,
"creationInfo": self.creation_info.as_dict(),
"packages": [package.as_dict() for package in self.packages],
"documentDescribes": [package.spdx_id for package in self.packages],
}
if self.files:
data["files"] = [file.as_dict() for file in self.files]
if self.extracted_licenses:
data["hasExtractedLicensingInfos"] = [
license_info.as_dict() for license_info in self.extracted_licenses
]
if self.relationships:
data["relationships"] = [
relationship.as_dict() for relationship in self.relationships
]
if self.comment:
data["comment"] = self.comment
return data
def as_json(self, indent=2):
"""Return the SPDX document as serialized JSON."""
return json.dumps(self.as_dict(), indent=indent)
@classmethod
def from_data(cls, data):
return cls(
spdx_id=data.get("SPDXID"),
version=data.get("spdxVersion", "").split("SPDX-")[-1],
data_license=data.get("dataLicense"),
name=data.get("name"),
namespace=data.get("documentNamespace"),
creation_info=CreationInfo.from_data(data.get("creationInfo", {})),
packages=[
Package.from_data(package_data)
for package_data in data.get("packages", [])
],
files=[File.from_data(file_data) for file_data in data.get("files", [])],
extracted_licenses=[
ExtractedLicensingInfo.from_data(license_info_data)
for license_info_data in data.get("hasExtractedLicensingInfos", [])
],
relationships=[
Relationship.from_data(relationship_data)
for relationship_data in data.get("relationships", [])
],
comment=data.get("comment"),
)
@staticmethod
def safe_document_name(name):
"""Convert provided `name` to a safe SPDX document name."""
return re.sub("[^A-Za-z0-9.]+", "_", name).lower()
def validate(self, schema):
"""Check the validity of this SPDX document."""
return validate_document(document=self.as_dict(), schema=schema)
def validate_document(document, schema=SPDX_SCHEMA_PATH):
"""
SPDX document validation.
Requires the `jsonschema` library.
"""
try:
import jsonschema
except ModuleNotFoundError:
print(
"The `jsonschema` library is required to run the validation.\n"
"Install with: `pip install jsonschema`"
)
raise
if isinstance(document, str):
document = json.loads(document)
if isinstance(document, Document):
document = document.as_dict()
if isinstance(schema, Path):
schema = schema.read_text()
if isinstance(schema, str):
schema = json.loads(schema)
jsonschema.validate(instance=document, schema=schema)
def is_spdx_document(input_location):
"""Return True if the file at `input_location` is a SPDX Document."""
with suppress(Exception):
data = json.loads(Path(input_location).read_text())
if data.get("SPDXID"):
return True
return False
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipes/spdx.py
| 0.770206 | 0.225193 |
spdx.py
|
pypi
|
import re
from pathlib import Path
java_package_re = re.compile(r"^\s*package\s+([\w\.]+)\s*;")
def get_java_package(location, java_extensions=(".java",), **kwargs):
"""
Return a Java package as a mapping with a single "java_package" key, or ``None``
from the .java source code file at ``location``.
Only look at files with an extension in the ``java_extensions`` tuple.
Note: this is the same API as a ScanCode Toolkit API scanner function by
design.
"""
if not location:
return
if not isinstance(location, Path):
location = Path(location)
if location.suffix not in java_extensions:
return
with open(location) as lines:
return find_java_package(lines)
def find_java_package(lines):
"""
Return a mapping of ``{'java_package': <value>}`` or ``None`` from an iterable or
text ``lines``.
For example::
>>> lines = [" package foo.back ; # dsasdasdasdasdasda.asdasdasd"]
>>> assert find_java_package(lines) == {"java_package": "foo.back"}
"""
package = _find_java_package(lines)
if package:
return {"java_package": package}
def _find_java_package(lines):
"""
Return a Java package or ``None`` from an iterable or text ``lines``.
For example::
>>> lines = [" package foo.back ; # dsasdasdasdasdasda.asdasdasd"]
>>> assert _find_java_package(lines) == "foo.back", _find_java_package(lines)
"""
for ln, line in enumerate(lines):
# only look at the first 500 lines
if ln > 500:
return
for package in java_package_re.findall(line):
if package:
return package
def get_normalized_java_path(path):
"""
Return a normalized .java file path for ``path`` .class file path string.
Account for inner classes in that their .java file name is the name of their
outer class.
For example::
>>> get_normalized_java_path("foo/org/common/Bar$inner.class")
'foo/org/common/Bar.java'
>>> get_normalized_java_path("foo/org/common/Bar.class")
'foo/org/common/Bar.java'
"""
if not path.endswith(".class"):
raise ValueError("Only path ending with .class are supported.")
path = Path(path.strip("/"))
class_name = path.name
if "$" in class_name: # inner class
class_name, _, _ = class_name.partition("$")
else:
class_name, _, _ = class_name.partition(".") # plain .class
return str(path.parent / f"{class_name}.java")
def get_fully_qualified_java_path(java_package, filename):
"""
Return a fully qualified java path of a .java ``filename`` in a
``java_package`` string.
Note that we use "/" as path separators.
For example::
>>> get_fully_qualified_java_path("org.common" , "Bar.java")
'org/common/Bar.java'
"""
java_package = java_package.replace(".", "/")
return f"{java_package}/{filename}"
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipes/jvm.py
| 0.82734 | 0.235911 |
jvm.py
|
pypi
|
from commoncode.resource import VirtualCodebase
from scanpipe.models import Project
def sort_by_lower_name(resource):
return resource["name"].lower()
def get_resource_fields(resource, fields):
"""Return a mapping of fields from `fields` and values from `resource`"""
return {field: getattr(resource, field) for field in fields}
def get_resource_tree(resource, fields, codebase=None, seen_resources=set()):
"""
Return a tree as a dictionary structure starting from the provided `resource`.
The following classes are supported for the input `resource` object:
- scanpipe.models.CodebaseResource
- commoncode.resource.Resource
The data included for each child is controlled with the `fields` argument.
The `codebase` is only required in the context of a commoncode `Resource`
input.
`seen_resources` is used when get_resource_tree() is used in the context of
get_codebase_tree(). We keep track of child Resources we visit in
`seen_resources`, so we don't visit them again in get_codebase_tree().
"""
resource_dict = get_resource_fields(resource, fields)
if resource.is_dir:
children = []
for child in resource.children(codebase):
seen_resources.add(child.path)
children.append(get_resource_tree(child, fields, codebase, seen_resources))
if children:
resource_dict["children"] = sorted(children, key=sort_by_lower_name)
return resource_dict
def get_codebase_tree(codebase, fields):
"""
Return a tree as a dictionary structure starting from the root resources of
the provided `codebase`.
The following classes are supported for the input `codebase` object:
- scanpipe.pipes.codebase.ProjectCodebase
- commoncode.resource.Codebase
- commoncode.resource.VirtualCodebase
The data included for each child is controlled with the `fields` argument.
"""
seen_resources = set()
codebase_dict = dict(children=[])
for resource in codebase.walk():
path = resource.path
if path in seen_resources:
continue
else:
seen_resources.add(path)
resource_dict = get_resource_fields(resource, fields)
if resource.is_dir:
children = []
for child in resource.children(codebase):
seen_resources.add(child.path)
children.append(
get_resource_tree(child, fields, codebase, seen_resources)
)
if children:
resource_dict["children"] = sorted(children, key=sort_by_lower_name)
codebase_dict["children"].append(resource_dict)
return codebase_dict
def get_basic_virtual_codebase(resources_qs):
"""
Return a VirtualCodebase created from CodebaseResources in `resources_qs`.
The only Resource fields that are populated are path, sha1, size, and
is_file. This is intended for use with
scanpipe.pipes.matchcode.fingerprint_codebase_directories
"""
resources = [
{"path": r.path, "sha1": r.sha1, "size": r.size, "is_file": r.is_file}
for r in resources_qs
]
return VirtualCodebase(location={"files": resources})
class ProjectCodebase:
"""
Represents the codebase of a project stored in the database.
A Codebase is a tree of Resources.
"""
project = None
def __init__(self, project):
if not isinstance(project, Project):
raise ValueError("Provided value for project is not a Project instance.")
self.project = project
@property
def root_resources(self):
return self.project.codebaseresources.exclude(path__contains="/")
@property
def resources(self):
return self.project.codebaseresources.all()
def walk(self, topdown=True):
for root_resource in self.root_resources:
if topdown:
yield root_resource
for resource in root_resource.walk(topdown=topdown):
yield resource
if not topdown:
yield root_resource
def get_tree(self):
return get_codebase_tree(self, fields=["name", "path"])
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipes/codebase.py
| 0.874426 | 0.354489 |
codebase.py
|
pypi
|
import concurrent.futures
import json
import logging
import multiprocessing
import os
import shlex
from collections import defaultdict
from functools import partial
from pathlib import Path
from django.apps import apps
from django.conf import settings
from django.db.models import ObjectDoesNotExist
from commoncode import fileutils
from commoncode.resource import VirtualCodebase
from extractcode import api as extractcode_api
from packagedcode import get_package_handler
from packagedcode import models as packagedcode_models
from scancode import Scanner
from scancode import api as scancode_api
from scancode import cli as scancode_cli
from scancode.cli import run_scan as scancode_run_scan
from scanpipe import pipes
from scanpipe.models import CodebaseResource
from scanpipe.pipes import flag
logger = logging.getLogger("scanpipe.pipes")
"""
Utilities to deal with ScanCode toolkit features and objects.
"""
scanpipe_app = apps.get_app_config("scanpipe")
def get_max_workers(keep_available):
"""
Return the `SCANCODEIO_PROCESSES` if defined in the setting,
or returns a default value based on the number of available CPUs,
minus the provided `keep_available` value.
On operating system where the multiprocessing start method is not "fork",
but for example "spawn", such as on macOS, multiprocessing and threading are
disabled by default returning -1 `max_workers`.
"""
processes = settings.SCANCODEIO_PROCESSES
if processes is not None:
return processes
if multiprocessing.get_start_method() != "fork":
return -1
max_workers = os.cpu_count() - keep_available
if max_workers < 1:
return 1
return max_workers
def extract_archive(location, target):
"""
Extract a single archive or compressed file at `location` to the `target`
directory.
Return a list of extraction errors.
Wrapper of the `extractcode.api.extract_archive` function.
"""
errors = []
for event in extractcode_api.extract_archive(location, target):
if event.done:
errors.extend(event.errors)
return errors
def extract_archives(location, recurse=False):
"""
Extract all archives at `location` and return errors.
Archives and compressed files are extracted in a new directory named
"<file_name>-extract" created in the same directory as each extracted
archive.
If `recurse` is True, extract nested archives-in-archives recursively.
Return a list of extraction errors.
Wrapper of the `extractcode.api.extract_archives` function.
"""
options = {
"recurse": recurse,
"replace_originals": False,
"all_formats": True,
}
errors = []
for event in extractcode_api.extract_archives(location, **options):
if event.done:
errors.extend(event.errors)
return errors
def get_resource_info(location):
"""Return a mapping suitable for the creation of a new CodebaseResource."""
file_info = {}
location_path = Path(location)
is_symlink = location_path.is_symlink()
is_file = location_path.is_file()
if is_symlink:
resource_type = CodebaseResource.Type.SYMLINK
file_info["status"] = "symlink"
elif is_file:
resource_type = CodebaseResource.Type.FILE
else:
resource_type = CodebaseResource.Type.DIRECTORY
file_info.update(
{
"type": resource_type,
"name": fileutils.file_name(location),
"extension": fileutils.file_extension(location),
}
)
if is_symlink:
return file_info
# Missing fields on CodebaseResource model returned by `get_file_info`.
unsupported_fields = [
"is_source",
"is_script",
"date",
]
other_info = scancode_api.get_file_info(location)
# Skip unsupported_fields
# Skip empty values to avoid null vs. '' conflicts
other_info = {
field_name: value
for field_name, value in other_info.items()
if field_name not in unsupported_fields and value
}
file_info.update(other_info)
return file_info
def _scan_resource(
location,
scanners,
with_threading=True,
timeout=settings.SCANCODEIO_SCAN_FILE_TIMEOUT,
):
"""
Wrap the scancode-toolkit `scan_resource` method to support timeout on direct
scanner functions calls.
Return a dictionary of scan `results` and a list of `errors`.
The `with_threading` needs to be enabled for the timeouts support.
"""
# `rid` is not needed in this context, yet required in the scan_resource args
location_rid = location, 0
_, _, errors, _, results, _ = scancode_cli.scan_resource(
location_rid,
scanners,
timeout=timeout,
with_threading=with_threading,
)
return results, errors
def scan_file(location, with_threading=True, min_license_score=0, **kwargs):
"""
Run a license, copyright, email, and url scan on a provided `location`,
using the scancode-toolkit direct API.
Return a dictionary of scan `results` and a list of `errors`.
"""
scancode_get_licenses = partial(
scancode_api.get_licenses,
min_score=min_license_score,
include_text=True,
)
scanners = [
Scanner("copyrights", scancode_api.get_copyrights),
Scanner("licenses", scancode_get_licenses),
Scanner("emails", scancode_api.get_emails),
Scanner("urls", scancode_api.get_urls),
]
return _scan_resource(location, scanners, with_threading=with_threading)
def scan_for_package_data(location, with_threading=True, **kwargs):
"""
Run a package scan on provided `location` using the scancode-toolkit direct API.
Return a dict of scan `results` and a list of `errors`.
"""
scanners = [
Scanner("package_data", scancode_api.get_package_data),
]
return _scan_resource(location, scanners, with_threading=with_threading)
def save_scan_file_results(codebase_resource, scan_results, scan_errors):
"""
Save the resource scan file results in the database.
Create project errors if any occurred during the scan.
"""
status = flag.SCANNED
if scan_errors:
codebase_resource.add_errors(scan_errors)
status = flag.SCANNED_WITH_ERROR
codebase_resource.set_scan_results(scan_results, status)
def save_scan_package_results(codebase_resource, scan_results, scan_errors):
"""
Save the resource scan package results in the database.
Create project errors if any occurred during the scan.
"""
if package_data := scan_results.get("package_data", []):
codebase_resource.update(
package_data=package_data,
status=flag.APPLICATION_PACKAGE,
)
if scan_errors:
codebase_resource.add_errors(scan_errors)
codebase_resource.update(status=flag.SCANNED_WITH_ERROR)
def _log_progress(scan_func, resource, resource_count, index):
progress = f"{index / resource_count * 100:.1f}% ({index}/{resource_count})"
logger.info(f"{scan_func.__name__} {progress} completed pk={resource.pk}")
def _scan_and_save(resource_qs, scan_func, save_func, scan_func_kwargs=None):
"""
Run the `scan_func` on the codebase resources if the provided `resource_qs`.
The `save_func` is called to save the results.
Multiprocessing is enabled by default on this pipe, the number of processes can be
controlled through the `SCANCODEIO_PROCESSES` setting.
Multiprocessing can be disabled using `SCANCODEIO_PROCESSES=0`,
and threading can also be disabled `SCANCODEIO_PROCESSES=-1`
The codebase resources QuerySet is chunked in 2000 results at the time,
this can result in a significant reduction in memory usage.
Note that all database related actions are executed in this main process as the
database connection does not always fork nicely in the pool processes.
"""
if not scan_func_kwargs:
scan_func_kwargs = {}
resource_count = resource_qs.count()
logger.info(f"Scan {resource_count} codebase resources with {scan_func.__name__}")
resource_iterator = resource_qs.iterator(chunk_size=2000)
max_workers = get_max_workers(keep_available=1)
if max_workers <= 0:
with_threading = False if max_workers == -1 else True
for index, resource in enumerate(resource_iterator):
_log_progress(scan_func, resource, resource_count, index)
scan_results, scan_errors = scan_func(
resource.location, with_threading, **scan_func_kwargs
)
save_func(resource, scan_results, scan_errors)
return
logger.info(f"Starting ProcessPoolExecutor with {max_workers} max_workers")
with concurrent.futures.ProcessPoolExecutor(max_workers) as executor:
future_to_resource = {
executor.submit(scan_func, resource.location): resource
for resource in resource_iterator
}
# Iterate over the Futures as they complete (finished or cancelled)
future_as_completed = concurrent.futures.as_completed(future_to_resource)
for index, future in enumerate(future_as_completed, start=1):
resource = future_to_resource[future]
_log_progress(scan_func, resource, resource_count, index)
scan_results, scan_errors = future.result()
save_func(resource, scan_results, scan_errors)
def scan_for_files(project, resource_qs=None):
"""
Run a license, copyright, email, and url scan on files without a status for
a `project`.
Multiprocessing is enabled by default on this pipe, the number of processes can be
controlled through the SCANCODEIO_PROCESSES setting.
"""
# Checking for None to make the distinction with an empty resource_qs queryset
if resource_qs is None:
resource_qs = project.codebaseresources.no_status()
scan_func_kwargs = {}
if license_score := project.get_env("scancode_license_score"):
scan_func_kwargs["min_license_score"] = license_score
_scan_and_save(resource_qs, scan_file, save_scan_file_results, scan_func_kwargs)
def scan_for_application_packages(project):
"""
Run a package scan on files without a status for a `project`,
then create DiscoveredPackage and DiscoveredDependency instances
from the detected package data
Multiprocessing is enabled by default on this pipe, the number of processes can be
controlled through the SCANCODEIO_PROCESSES setting.
"""
resource_qs = project.codebaseresources.no_status()
# Collect detected Package data and save it to the CodebaseResource it was
# detected from.
_scan_and_save(
resource_qs=resource_qs,
scan_func=scan_for_package_data,
save_func=save_scan_package_results,
)
# Iterate through CodebaseResources with Package data and handle them using
# the proper Package handler from packagedcode.
assemble_packages(project=project)
def add_resource_to_package(package_uid, resource, project):
"""
Relate a DiscoveredPackage to `resource` from `project` using `package_uid`.
Add a ProjectMessage when the DiscoveredPackage could not be fetched using the
provided `package_uid`.
"""
if not package_uid:
return
resource_package = resource.discovered_packages.filter(package_uid=package_uid)
if resource_package.exists():
return
try:
package = project.discoveredpackages.get(package_uid=package_uid)
except ObjectDoesNotExist as error:
details = {
"package_uid": str(package_uid),
"resource": str(resource),
}
project.add_error(error, model="assemble_package", details=details)
return
resource.discovered_packages.add(package)
def assemble_packages(project):
"""
Create instances of DiscoveredPackage and DiscoveredDependency for `project`
from the parsed package data present in the CodebaseResources of `project`.
"""
logger.info(f"Project {project} assemble_packages:")
seen_resource_paths = set()
for resource in project.codebaseresources.has_package_data():
if resource.path in seen_resource_paths:
continue
logger.info(f" Processing: {resource.path}")
for package_mapping in resource.package_data:
pd = packagedcode_models.PackageData.from_dict(mapping=package_mapping)
logger.info(f" Package data: {pd.purl}")
handler = get_package_handler(pd)
logger.info(f" Selected package handler: {handler.__name__}")
items = handler.assemble(
package_data=pd,
resource=resource,
codebase=project,
package_adder=add_resource_to_package,
)
for item in items:
logger.info(f" Processing item: {item}")
if isinstance(item, packagedcode_models.Package):
package_data = item.to_dict()
pipes.update_or_create_package(project, package_data)
elif isinstance(item, packagedcode_models.Dependency):
dependency_data = item.to_dict()
pipes.update_or_create_dependency(project, dependency_data)
elif isinstance(item, CodebaseResource):
seen_resource_paths.add(item.path)
else:
logger.info(f"Unknown Package assembly item type: {item!r}")
def get_pretty_params(args):
"""Format provided ``args`` for the ``pretty_params`` run_scan argument."""
return {f"--{key.replace('_', '-')}": value for key, value in args.items()}
def run_scan(location, output_file, run_scan_args):
"""
Scan the `location` content and write the results into an `output_file`.
If `raise_on_error` is enabled, a ScancodeError will be raised if an error occurs
during the scan.
"""
run_args = settings.SCANCODE_TOOLKIT_RUN_SCAN_ARGS.copy()
# The run_scan_args should override any values provided in the settings
run_args.update(run_scan_args)
if "timeout" in run_args:
run_args["timeout"] = int(run_args.get("timeout"))
success, results = scancode_run_scan(
input=shlex.quote(location),
processes=get_max_workers(keep_available=1),
quiet=True,
verbose=False,
return_results=True,
echo_func=None,
pretty_params=get_pretty_params(run_args),
**run_args,
)
if success:
Path(output_file).write_text(json.dumps(results, indent=2))
return
errors = {}
for file in results.get("files", []):
if scan_errors := file.get("scan_errors"):
errors[file.get("path")] = scan_errors
return errors
def get_virtual_codebase(project, input_location):
"""
Return a ScanCode virtual codebase built from the JSON scan file located at
the `input_location`.
"""
temp_path = project.tmp_path / "scancode-temp-resource-cache"
temp_path.mkdir(parents=True, exist_ok=True)
return VirtualCodebase(input_location, temp_dir=str(temp_path), max_in_memory=0)
def create_codebase_resources(project, scanned_codebase):
"""
Save the resources of a ScanCode `scanned_codebase` scancode.resource.Codebase
object to the database as a CodebaseResource of the `project`.
This function can be used to expend an existing `project` Codebase with new
CodebaseResource objects as the existing objects (based on the `path`) will be
skipped.
"""
for scanned_resource in scanned_codebase.walk(skip_root=True):
resource_data = {}
for field in CodebaseResource._meta.fields:
# Do not include the path as provided by the scanned_resource since it
# includes the "root". The `get_path` method is used instead.
if field.name == "path":
continue
value = getattr(scanned_resource, field.name, None)
if value is not None:
resource_data[field.name] = value
resource_type = "FILE" if scanned_resource.is_file else "DIRECTORY"
resource_data["type"] = CodebaseResource.Type[resource_type]
resource_path = scanned_resource.get_path(strip_root=True)
codebase_resource, _ = CodebaseResource.objects.get_or_create(
project=project,
path=resource_path,
defaults=resource_data,
)
for_packages = getattr(scanned_resource, "for_packages", [])
for package_uid in for_packages:
logger.debug(f"Assign {package_uid} to {codebase_resource}")
package = project.discoveredpackages.get(package_uid=package_uid)
set_codebase_resource_for_package(
codebase_resource=codebase_resource,
discovered_package=package,
)
def create_discovered_packages(project, scanned_codebase):
"""
Save the packages of a ScanCode `scanned_codebase` scancode.resource.Codebase
object to the database as a DiscoveredPackage of `project`.
"""
if hasattr(scanned_codebase.attributes, "packages"):
for package_data in scanned_codebase.attributes.packages:
pipes.update_or_create_package(project, package_data)
def create_discovered_dependencies(
project, scanned_codebase, strip_datafile_path_root=False
):
"""
Save the dependencies of a ScanCode `scanned_codebase` scancode.resource.Codebase
object to the database as a DiscoveredDependency of `project`.
If `strip_datafile_path_root` is True, then
`DiscoveredDependency.create_from_data()` will strip the root path segment
from the `datafile_path` of `dependency_data` before looking up the
corresponding CodebaseResource for `datafile_path`. This is used in the case
where Dependency data is imported from a scancode-toolkit scan, where the
root path segments are not stripped for `datafile_path`.
"""
if hasattr(scanned_codebase.attributes, "dependencies"):
for dependency_data in scanned_codebase.attributes.dependencies:
pipes.update_or_create_dependency(
project,
dependency_data,
strip_datafile_path_root=strip_datafile_path_root,
)
def set_codebase_resource_for_package(codebase_resource, discovered_package):
"""
Assign the `discovered_package` to the `codebase_resource` and set its
status to "application-package".
"""
codebase_resource.add_package(discovered_package)
codebase_resource.update(status=flag.APPLICATION_PACKAGE)
def get_detection_data(detection_entry):
license_expression = detection_entry.get("license_expression")
identifier = detection_entry.get("identifier")
matches = []
for match in detection_entry.get("matches", []):
match_license_expression = match.get("license_expression")
# Do not include those match.expression when not part of this detection
# entry license_expression as those are not counted in the summary
if match_license_expression in license_expression:
matches.append(
{
"license_expression": match_license_expression,
"matched_text": match.get("matched_text"),
}
)
return {
"license_expression": license_expression,
"identifier": identifier,
"matches": matches,
}
def get_license_matches_grouped(project):
"""
Return a dictionary of all license_matches of a given ``project`` grouped by
``resource.detected_license_expression``.
"""
resources_with_license = project.codebaseresources.has_license_detections()
license_matches = defaultdict(dict)
for resource in resources_with_license:
matches = [
get_detection_data(detection_entry)
for detection_entry in resource.license_detections
]
license_matches[resource.detected_license_expression][resource.path] = matches
return dict(license_matches)
def make_results_summary(project, scan_results_location):
"""
Extract selected sections of the Scan results, such as the `summary`
`license_clarity_score`, and `license_matches` related data.
The `key_files` are also collected and injected in the `summary` output.
"""
from scanpipe.api.serializers import CodebaseResourceSerializer
from scanpipe.api.serializers import DiscoveredPackageSerializer
with open(scan_results_location) as f:
scan_data = json.load(f)
summary = scan_data.get("summary")
# Inject the generated `license_matches` in the summary from the project
# codebase resources.
summary["license_matches"] = get_license_matches_grouped(project)
# Inject the `key_files` and their file content in the summary
key_files = []
key_files_qs = project.codebaseresources.filter(is_key_file=True, is_text=True)
for resource in key_files_qs:
resource_data = CodebaseResourceSerializer(resource).data
resource_data["content"] = resource.file_content
key_files.append(resource_data)
summary["key_files"] = key_files
# Inject the `key_files_packages` filtered from the key_files_qs
key_files_packages_qs = project.discoveredpackages.filter(
codebase_resources__in=key_files_qs
).distinct()
summary["key_files_packages"] = [
DiscoveredPackageSerializer(package).data for package in key_files_packages_qs
]
return summary
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipes/scancode.py
| 0.697918 | 0.156749 |
scancode.py
|
pypi
|
from typing import NamedTuple
import ahocorasick
"""
Path matching using Aho-Corasick automatons.
The approach is to create a trie of all reversed path suffixes aka. subpath each
mapped to a tuple of:
(subpath length, [list of path ids]).
And then search this index using Aho-Corasick search.
For instance with this list of path ids and paths:
1 RouterStub.java
23 samples/screenshot.png
3 samples/JGroups/src/RouterStub.java
42 src/screenshot.png
We will create this list of inverted subpaths:
RouterStub.java
screenshot.png
screenshot.png/samples
RouterStub.java/JGroups/src/samples
RouterStub.java/JGroups/src
RouterStub.java/JGroups
RouterStub.java
screenshot.png
screenshot.png/src
And we will have this index:
inverted path -> (number of segments, [list of path ids])
RouterStub.java -> (1, [1, 3])
screenshot.png -> (1, [23, 42])
screenshot.png/samples -> (2, [23])
RouterStub.java/JGroups/src/samples -> (4, [3])
RouterStub.java/JGroups/src -> (3, [3])
RouterStub.java/JGroups -> (3, [3])
screenshot.png/src -> (2, [42])
"""
class Match(NamedTuple):
# number of matched path segments
matched_path_length: int
resource_ids: list
def find_paths(path, index):
"""
Return a Match for the longest paths matched in the ``index`` automaton for
a POSIX ``path`` string.
Return None if there is not matching paths found.
"""
segments = get_reversed_path_segments(path)
reversed_path = convert_segments_to_path(segments)
# We use iter_long() to get the longest matches
matches = list(index.iter_long(reversed_path))
if not matches:
return
# Filter after to keep only one match per path which is always the match
# matching the suffix of the path and not something in the middle
good_match = matches[0]
_, (matched_length, resource_ids) = good_match
return Match(matched_length, resource_ids)
def build_index(resource_id_and_paths, with_subpaths=True):
"""
Return an index (an index) built from a ``resource_id_and_paths``
iterable of tuples of (resource_id int, resource_path string).
If `with_subpaths`` is True, index all suffixes of the paths, other index
and match only each complete path.
For example, for the path "samples/JGroups/src/RouterStub.java", the
suffixes are:
samples/JGroups/src/RouterStub.java
JGroups/src/RouterStub.java
src/RouterStub.java
RouterStub.java
"""
# create a new empty automaton.
index = ahocorasick.Automaton(ahocorasick.STORE_ANY, ahocorasick.KEY_STRING)
for resource_id, resource_path in resource_id_and_paths:
segments = get_reversed_path_segments(resource_path)
segments_count = len(segments)
if with_subpaths:
add_subpaths(resource_id, segments, segments_count, index)
else:
add_path(resource_id, segments, segments_count, index)
index.make_automaton()
return index
def add_path(resource_id, segments, segments_count, index):
"""
Add the ``resource_id`` path represented by its list of reversed path
``segments`` with ``segments_count`` segments to the ``index`` automaton.
"""
indexable_path = convert_segments_to_path(segments)
existing = index.get(indexable_path, None)
if existing:
# For multiple identical path suffixes, append to the list of
# resource_ids
_seg_count, resource_ids = existing
resource_ids.append(resource_id)
else:
# We store this value mapped to a indexable_path as a tuple of
# (segments count, [list of resource ids])
value = segments_count, [resource_id]
index.add_word(indexable_path, value)
def add_subpaths(resource_id, segments, segments_count, index):
"""
Add all the ``resource_id`` subpaths "suffixes" of the resource path as
represented by its list of reversed path ``segments`` with
``segments_count`` segments to the ``index`` automaton.
"""
for segment_count in range(segments_count):
subpath_segments_count = segment_count + 1
subpath_segments = segments[:subpath_segments_count]
add_path(
resource_id=resource_id,
segments=subpath_segments,
segments_count=subpath_segments_count,
index=index,
)
def get_reversed_path_segments(path):
"""
Return reversed segments list given a POSIX ``path`` string. We reverse
based on path segments separated by a "/".
Note that the inputh ``path`` is assumed to be normalized, not relative and
not containing double slash.
For example::
>>> assert get_reversed_path_segments("a/b/c.js") == ["c.js", "b", "a"]
"""
# [::-1] does the list reversing
reversed_segments = path.strip("/").split("/")[::-1]
return reversed_segments
def convert_segments_to_path(segments):
"""
Return a path string is suitable for indexing or matching given a
``segments`` sequence of path segment strings.
The resulting reversed path is prefixed and suffixed by a "/" irrespective
of whether the original path is a file or directory and had such prefix or
suffix.
For example::
>>> assert convert_segments_to_path(["c.js", "b", "a"]) == "/c.js/b/a/"
"""
return "/" + "/".join(segments) + "/"
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipes/pathmap.py
| 0.901086 | 0.569553 |
pathmap.py
|
pypi
|
import logging
import posixpath
from collections import namedtuple
from pathlib import Path
from container_inspector.image import Image
from container_inspector.utils import extract_tar
from scanpipe import pipes
from scanpipe.pipes import flag
from scanpipe.pipes import rootfs
logger = logging.getLogger(__name__)
def get_tarballs_from_inputs(project):
"""
Return the tarballs from the `project` input/ work directory.
Supported file extensions: `.tar`, `.tar.gz`, `.tgz`.
"""
return [
tarball
for pattern in ("*.tar*", "*.tgz")
for tarball in project.inputs(pattern=pattern)
]
def extract_images_from_inputs(project):
"""
Collect all the tarballs from the `project` input/ work directory, extracts
each tarball to the tmp/ work directory and collects the images.
Return the `images` and an `errors` list of error messages that may have
happened during the extraction.
"""
target_path = project.tmp_path
images = []
errors = []
for tarball in get_tarballs_from_inputs(project):
extract_target = target_path / f"{tarball.name}-extract"
imgs, errs = extract_image_from_tarball(tarball, extract_target)
images.extend(imgs)
errors.extend(errs)
return images, errors
def extract_image_from_tarball(input_tarball, extract_target, verify=True):
"""
Extract images from an ``input_tarball`` to an ``extract_target`` directory
Path object and collects the extracted images.
Return the `images` and an `errors` list of error messages that may have
happened during the extraction.
"""
errors = extract_tar(
location=input_tarball,
target_dir=extract_target,
skip_symlinks=False,
as_events=False,
)
images = Image.get_images_from_dir(
extracted_location=str(extract_target),
verify=verify,
)
return images, errors
def extract_layers_from_images(project, images):
"""
Extract all layers from the provided `images` into the `project` codebase
work directory.
Return an `errors` list of error messages that may occur during the
extraction.
"""
return extract_layers_from_images_to_base_path(
base_path=project.codebase_path,
images=images,
)
def extract_layers_from_images_to_base_path(base_path, images):
"""
Extract all layers from the provided `images` into the `base_path` work
directory.
Return an `errors` list of error messages that may occur during the
extraction.
"""
errors = []
base_path = Path(base_path)
for image in images:
image_dirname = Path(image.extracted_location).name
target_path = base_path / image_dirname
for layer in image.layers:
extract_target = target_path / layer.layer_id
extract_errors = extract_tar(
location=layer.archive_location,
target_dir=extract_target,
skip_symlinks=False,
as_events=False,
)
errors.extend(extract_errors)
layer.extracted_location = str(extract_target)
return errors
def get_image_data(image, layer_path_segments=2):
"""
Return a mapping of image-related data given an `image`.
Keep only ``layer_path_segments`` trailing layer location segments (or keep
the locations unmodified if ``layer_path_segments`` is 0).
"""
exclude_from_img = ["extracted_location", "archive_location"]
image_data = {
key: value
for key, value in image.to_dict(layer_path_segments=layer_path_segments).items()
if key not in exclude_from_img
}
return image_data
def get_layer_tag(image_id, layer_id, layer_index, id_length=6):
"""
Return a "tag" crafted from the provided `image_id`, `layer_id`, and `layer_index`.
The purpose of this tag is to be short, clear and sortable.
For instance, given an image with an id:
785df58b6b3e120f59bce6cd10169a0c58b8837b24f382e27593e2eea011a0d8
and two layers from bottom to top as:
0690c89adf3e8c306d4ced085fc16d1d104dcfddd6dc637e141fa78be242a707
7a1d89d2653e8e4aa9011fd95034a4857109d6636f2ad32df470a196e5dd1585
we would get these two tags:
img-785df5-layer-01-0690c8
img-785df5-layer-02-7a1d89
"""
short_image_id = image_id[:id_length]
short_layer_id = layer_id[:id_length]
return f"img-{short_image_id}-layer-{layer_index:02}-{short_layer_id}"
def create_codebase_resources(project, image):
"""Create the CodebaseResource for an `image` in a `project`."""
for layer_index, layer in enumerate(image.layers, start=1):
layer_tag = get_layer_tag(image.image_id, layer.layer_id, layer_index)
for resource in layer.get_resources(with_dir=True):
pipes.make_codebase_resource(
project=project,
location=resource.location,
rootfs_path=resource.path,
tag=layer_tag,
)
def _create_system_package(project, purl, package, layer):
"""Create system package and related resources."""
created_package = pipes.update_or_create_package(project, package.to_dict())
installed_files = []
if hasattr(package, "resources"):
installed_files = package.resources
# We have no files for this installed package, we cannot go further.
if not installed_files:
logger.info(f" No installed_files for: {purl}")
return
missing_resources = created_package.missing_resources[:]
modified_resources = created_package.modified_resources[:]
codebase_resources = project.codebaseresources.all()
for install_file in installed_files:
install_file_path = install_file.get_path(strip_root=True)
install_file_path = pipes.normalize_path(install_file_path)
layer_rootfs_path = posixpath.join(
layer.layer_id,
install_file_path.strip("/"),
)
logger.info(f" installed file rootfs_path: {install_file_path}")
logger.info(f" layer rootfs_path: {layer_rootfs_path}")
resource_qs = codebase_resources.filter(
path__endswith=layer_rootfs_path,
rootfs_path=install_file_path,
)
found_resource = False
for resource in resource_qs:
found_resource = True
if created_package not in resource.discovered_packages.all():
resource.discovered_packages.add(created_package)
resource.update(status=flag.SYSTEM_PACKAGE)
logger.info(f" added as system-package to: {purl}")
if rootfs.has_hash_diff(install_file, resource):
if install_file.path not in modified_resources:
modified_resources.append(install_file.path)
if not found_resource and install_file_path not in missing_resources:
missing_resources.append(install_file_path)
logger.info(f" installed file is missing: {install_file_path}")
created_package.update(
missing_resources=missing_resources,
modified_resources=modified_resources,
)
def scan_image_for_system_packages(project, image):
"""
Given a `project` and an `image` - this scans the `image` layer by layer for
installed system packages and creates a DiscoveredPackage for each.
Then for each installed DiscoveredPackage file, check if it exists
as a CodebaseResource. If exists, relate that CodebaseResource to its
DiscoveredPackage; otherwise, keep that as a missing file.
"""
if not image.distro:
raise rootfs.DistroNotFound("Distro not found.")
distro_id = image.distro.identifier
if distro_id not in rootfs.SUPPORTED_DISTROS:
raise rootfs.DistroNotSupported(f'Distro "{distro_id}" is not supported.')
installed_packages = image.get_installed_packages(rootfs.package_getter)
for index, (purl, package, layer) in enumerate(installed_packages):
logger.info(f"Creating package #{index}: {purl}")
_create_system_package(project, purl, package, layer)
def flag_whiteout_codebase_resources(project):
"""
Tag overlayfs/AUFS whiteout special files CodebaseResource as "ignored-whiteout".
See https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts
for details.
"""
whiteout_prefix = ".wh."
qs = project.codebaseresources.no_status()
qs.filter(name__startswith=whiteout_prefix).update(status=flag.IGNORED_WHITEOUT)
layer_fields = [
"layer_tag",
"created_by",
"layer_id",
"image_id",
"created",
"size",
"author",
"comment",
"archive_location",
]
Layer = namedtuple("Layer", layer_fields)
def get_layers_data(project):
"""Get list of structured layers data from project extra_data field."""
layers_data = []
images = project.extra_data.get("images", [])
if not isinstance(images, list):
return []
for image in images:
image_id = image.get("image_id")
for layer_index, layer in enumerate(image.get("layers", []), start=1):
layer_id = layer.get("layer_id")
layers_data.append(
Layer(
layer_tag=get_layer_tag(image_id, layer_id, layer_index),
created_by=layer.get("created_by"),
layer_id=layer_id,
image_id=image_id,
created=layer.get("created"),
size=layer.get("size"),
author=layer.get("author"),
comment=layer.get("comment"),
archive_location=layer.get("archive_location"),
)
)
return layers_data
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipes/docker.py
| 0.778102 | 0.319679 |
docker.py
|
pypi
|
import hashlib
import json
from contextlib import suppress
from pathlib import Path
from django.core.exceptions import MultipleObjectsReturned
from django.core.exceptions import ObjectDoesNotExist
from scanpipe import pipes
from scanpipe.models import CodebaseResource
from scanpipe.pipes import flag
from scanpipe.pipes import get_text_str_diff_ratio
from scanpipe.pipes import pathmap
# `PROSPECTIVE_JAVASCRIPT_MAP` maps transformed JS file to a dict
# that specifies extension of related files. The `related` key in
# each dict specifies the file extension of the related transformed file, and
# the `sources` key specifies the list of possible source extension.
PROSPECTIVE_JAVASCRIPT_MAP = {
".scss.js.map": {
"related": [".scss.js", ".css", ".css.map", "_rtl.css"],
"sources": [".scss"],
},
".js.map": {
"related": [".js", ".jsx", ".ts"],
"sources": [".jsx", ".ts", ".js"],
},
".soy.js.map": {
"related": [".soy.js", ".soy"],
"sources": [".soy"],
},
".css.map": {
"related": [".css"],
"sources": [".css"],
},
".ts": {
"related": [],
"sources": [".d.ts"],
},
}
def is_source_mapping_in_minified(resource, map_file_name):
"""Return True if a string contains a source mapping in its last 5 lines."""
source_mapping = f"sourceMappingURL={map_file_name}"
lines = resource.file_content.split("\n")
total_lines = len(lines)
# Get the last 5 lines.
tail = 5 if total_lines > 5 else total_lines
return any(source_mapping in line for line in reversed(lines[-tail:]))
def sha1(content):
"""Calculate the SHA-1 hash of a string."""
# The following hash is not used in any security context. It is only used
# to generate a value for matching purposes, collisions are acceptable and
# "content" is not coming from user-generated input.
return hashlib.sha1(content.encode()).hexdigest() # nosec
def source_content_sha1_list(map_file):
"""Return list containing sha1 of sourcesContent."""
contents = get_map_sources_content(map_file)
return [sha1(content) for content in contents if content]
def load_json_from_file(location):
"""Return the deserialized json content from ``location``."""
with open(location) as f:
try:
return json.load(f)
except json.JSONDecodeError:
return
def get_map_sources(map_file):
"""Return source paths from a map file."""
if data := load_json_from_file(map_file.location):
sources = data.get("sources", [])
sources = [
source.rsplit("../", 1)[-1]
for source in sources
if source and not source.startswith("webpack:///")
]
return [source for source in sources if len(Path(source).parts) > 1]
return []
def get_map_sources_content(map_file):
"""Return sources contents from a map file."""
if data := load_json_from_file(map_file.location):
return data.get("sourcesContent", [])
return []
def get_matches_by_sha1(to_map, from_resources):
content_sha1_list = source_content_sha1_list(to_map)
sources = get_map_sources(to_map)
all_source_path_available = len(sources) == len(content_sha1_list)
if not all_source_path_available:
sha1_matches = from_resources.filter(sha1__in=content_sha1_list)
# Only create relations when the number of sha1 matches if inferior or equal
# to the number of sourcesContent in map.
if len(sha1_matches) > len(content_sha1_list):
return
return [(match, {}) for match in sha1_matches]
matches = []
for sha1, source_path in zip(content_sha1_list, sources):
try:
match = from_resources.get(sha1=sha1, path__endswith=source_path)
except (MultipleObjectsReturned, ObjectDoesNotExist):
match = None
if match:
matches.append((match, {}))
return matches
def get_matches_by_ratio(
to_map, from_resources_index, from_resources, diff_ratio_threshold=0.98
):
sources = get_map_sources(to_map)
sources_content = get_map_sources_content(to_map)
matches = []
for source, content in zip(sources, sources_content):
prospect = pathmap.find_paths(source, from_resources_index)
if not prospect:
continue
# Only create relations when the number of matches is inferior or equal to
# the current number of path segment matched.
too_many_prospects = len(prospect.resource_ids) > prospect.matched_path_length
if too_many_prospects:
continue
match = None
too_many_match = False
for resource_id in prospect.resource_ids:
from_source = from_resources.get(id=resource_id)
diff_ratio = get_text_str_diff_ratio(content, from_source.file_content)
if not diff_ratio or diff_ratio < diff_ratio_threshold:
continue
if match:
too_many_match = True
break
match = (from_source, {"diff_ratio": f"{diff_ratio:.1%}"})
# For a given pair of source path and source content there should be
# one and only one from resource.
if not too_many_match and match:
matches.append(match)
return matches
def get_minified_resource(map_resource, minified_resources):
"""
Return the corresponding minified_resource given a ``map_resource`` Resource
object and a ``minified_resources`` query set of minified JS Resource.
Return None if it cannot be found.
"""
path = Path(map_resource.path.lstrip("/"))
minified_file, _ = path.name.split(".map")
minified_file_path = path.parent / minified_file
minified_resource = minified_resources.get_or_none(path=minified_file_path)
if not minified_resource:
return
if is_source_mapping_in_minified(minified_resource, path.name):
return minified_resource
_js_extensions = (
".scss.js.map",
".soy.js.map",
".css.map",
".js.map",
".scss.js",
".soy.js",
".d.ts",
".scss",
".soy",
".css",
".jsx",
".js",
".ts",
)
def get_js_map_basename_and_extension(filename):
"""
Return a 2-tuple pf (basename, extension) of a JavaScript/TypeScript related
file. Return None otherwise.
"""
# The order of extensions in the list matters since
# `.d.ts` should be tested first before `.ts`.
if not filename.endswith(_js_extensions):
return
for ext in _js_extensions:
if filename.endswith(ext):
basename = filename[: -len(ext)]
return basename, ext
def map_related_files(to_resources, to_resource, from_resource, map_type, extra_data):
if not from_resource:
return 0
path = Path(to_resource.path.lstrip("/"))
basename_and_extension = get_js_map_basename_and_extension(path.name)
basename, extension = basename_and_extension
base_path = path.parent / basename
prospect = PROSPECTIVE_JAVASCRIPT_MAP.get(extension, {})
transpiled = [to_resource]
for related_ext in prospect.get("related", []):
with suppress(CodebaseResource.DoesNotExist):
transpiled.append(to_resources.get(path=f"{base_path}{related_ext}"))
for match in transpiled:
pipes.make_relation(
from_resource=from_resource,
to_resource=match,
map_type=map_type,
extra_data=extra_data,
)
match.update(status=flag.MAPPED)
return len(transpiled)
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipes/js.py
| 0.761272 | 0.31237 |
js.py
|
pypi
|
import shutil
from pathlib import Path
from django.core.exceptions import FieldDoesNotExist
from django.core.validators import EMPTY_VALUES
from django.db import models
import openpyxl
from scanpipe import pipes
from scanpipe.models import CodebaseRelation
from scanpipe.models import CodebaseResource
from scanpipe.models import DiscoveredDependency
from scanpipe.models import DiscoveredPackage
from scanpipe.pipes import scancode
from scanpipe.pipes.output import mappings_key_by_fieldname
def copy_input(input_location, dest_path):
"""Copy the ``input_location`` to the ``dest_path``."""
destination = dest_path / Path(input_location).name
return shutil.copyfile(input_location, destination)
def copy_inputs(input_locations, dest_path):
"""Copy the provided ``input_locations`` to the ``dest_path``."""
for input_location in input_locations:
copy_input(input_location, dest_path)
def move_inputs(inputs, dest_path):
"""Move the provided ``inputs`` to the ``dest_path``."""
for input_location in inputs:
destination = dest_path / Path(input_location).name
shutil.move(input_location, destination)
def get_tool_name_from_scan_headers(scan_data):
"""Return the ``tool_name`` of the first header in the provided ``scan_data``."""
if headers := scan_data.get("headers", []):
first_header = headers[0]
tool_name = first_header.get("tool_name", "")
return tool_name
def load_inventory_from_toolkit_scan(project, input_location):
"""
Create packages, dependencies, and resources loaded from the ScanCode-toolkit scan
results located at ``input_location``.
"""
scanned_codebase = scancode.get_virtual_codebase(project, input_location)
scancode.create_discovered_packages(project, scanned_codebase)
scancode.create_codebase_resources(project, scanned_codebase)
scancode.create_discovered_dependencies(
project, scanned_codebase, strip_datafile_path_root=True
)
def load_inventory_from_scanpipe(project, scan_data):
"""
Create packages, dependencies, resources, and relations loaded from a ScanCode.io
JSON output provided as ``scan_data``.
"""
for package_data in scan_data.get("packages", []):
pipes.update_or_create_package(project, package_data)
for resource_data in scan_data.get("files", []):
pipes.update_or_create_resource(project, resource_data)
for dependency_data in scan_data.get("dependencies", []):
pipes.update_or_create_dependency(project, dependency_data)
for relation_data in scan_data.get("relations", []):
pipes.get_or_create_relation(project, relation_data)
model_to_object_maker_func = {
DiscoveredPackage: pipes.update_or_create_package,
DiscoveredDependency: pipes.update_or_create_dependency,
CodebaseResource: pipes.update_or_create_resource,
CodebaseRelation: pipes.get_or_create_relation,
}
worksheet_name_to_model = {
"PACKAGES": DiscoveredPackage,
"RESOURCES": CodebaseResource,
"DEPENDENCIES": DiscoveredDependency,
"RELATIONS": CodebaseRelation,
}
def get_worksheet_data(worksheet):
"""Return the data from provided ``worksheet`` as a list of dict."""
try:
header = [cell.value for cell in next(worksheet.rows)]
except StopIteration:
return {}
worksheet_data = [
dict(zip(header, row))
for row in worksheet.iter_rows(min_row=2, values_only=True)
]
return worksheet_data
def clean_xlsx_field_value(model_class, field_name, value):
"""Clean the ``value`` for compatibility with the database ``model_class``."""
if value in EMPTY_VALUES:
return
if field_name == "for_packages":
return value.splitlines()
elif field_name in ["purl", "for_package_uid", "datafile_path"]:
return value
try:
field = model_class._meta.get_field(field_name)
except FieldDoesNotExist:
return
if dict_key := mappings_key_by_fieldname.get(field_name):
return [{dict_key: entry} for entry in value.splitlines()]
elif isinstance(field, models.JSONField):
if field.default == list:
return value.splitlines()
elif field.default == dict:
return # dict stored as JSON are not supported
return value
def clean_xlsx_data_to_model_data(model_class, xlsx_data):
"""Clean the ``xlsx_data`` for compatibility with the database ``model_class``."""
cleaned_data = {}
for field_name, value in xlsx_data.items():
if cleaned_value := clean_xlsx_field_value(model_class, field_name, value):
cleaned_data[field_name] = cleaned_value
return cleaned_data
def load_inventory_from_xlsx(project, input_location):
"""
Create packages, dependencies, resources, and relations loaded from XLSX file
located at ``input_location``.
"""
workbook = openpyxl.load_workbook(input_location, read_only=True, data_only=True)
for worksheet_name, model_class in worksheet_name_to_model.items():
if worksheet_name not in workbook:
continue
worksheet_data = get_worksheet_data(worksheet=workbook[worksheet_name])
for row_data in worksheet_data:
object_maker_func = model_to_object_maker_func.get(model_class)
cleaned_data = clean_xlsx_data_to_model_data(model_class, row_data)
if cleaned_data:
object_maker_func(project, cleaned_data)
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipes/input.py
| 0.754192 | 0.215041 |
input.py
|
pypi
|
import fnmatch
import logging
import os
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
import attr
from commoncode.ignore import default_ignores
from container_inspector.distro import Distro
from packagedcode import plugin_package
from scanpipe import pipes
from scanpipe.pipes import flag
logger = logging.getLogger(__name__)
SUPPORTED_DISTROS = [
"alpine",
"debian",
"ubuntu",
"rhel",
"centos",
"fedora",
"sles",
"opensuse",
"opensuse-tumbleweed",
"photon",
"windows",
"rocky",
]
class DistroNotFound(Exception):
pass
class DistroNotSupported(Exception):
pass
@attr.attributes
class Resource:
rootfs_path = attr.attrib(
default=None,
metadata=dict(doc="The rootfs root-relative path for this Resource."),
)
location = attr.attrib(
default=None, metadata=dict(doc="The absolute location for this Resource.")
)
@attr.attributes
class RootFs:
"""A root filesystem."""
location = attr.attrib(
metadata=dict(doc="The root directory location where this rootfs lives.")
)
distro = attr.attrib(
default=None, metadata=dict(doc="The Distro object for this rootfs.")
)
def __attrs_post_init__(self, *args, **kwargs):
self.distro = Distro.from_rootfs(self.location)
@classmethod
def from_project_codebase(cls, project):
"""
Return RootFs objects collected from the project's "codebase" directory.
Each directory in the input/ is considered as the root of a root filesystem.
"""
subdirs = [path for path in project.codebase_path.glob("*/") if path.is_dir()]
for subdir in subdirs:
rootfs_location = str(subdir.absolute())
yield RootFs(location=rootfs_location)
def get_resources(self, with_dir=False):
"""Return a Resource for each file in this rootfs."""
return get_resources(location=self.location, with_dir=with_dir)
def get_installed_packages(self, packages_getter):
"""
Return tuples of (package_url, package) for installed packages found in
this rootfs layer using the `packages_getter` function or callable.
The `packages_getter()` function should:
- Accept a first argument string that is the root directory of
filesystem of this rootfs
- Return tuples of (package_url, package) where package_url is a
package_url string that uniquely identifies a package; while, a `package`
is an object that represents a package (typically a scancode-
toolkit packagedcode.models.Package class or some nested mapping with
the same structure).
The `packages_getter` function would typically query the system packages
database, such as an RPM database or similar, to collect the list of
installed system packages.
"""
return packages_getter(self.location)
def get_resources(location, with_dir=False):
"""Return the Resource found in the `location` in root directory of a rootfs."""
def get_res(parent, fname):
loc = os.path.join(parent, fname)
rootfs_path = pipes.normalize_path(loc.replace(location, ""))
return Resource(
location=loc,
rootfs_path=rootfs_path,
)
for top, dirs, files in os.walk(location):
for f in files:
yield get_res(parent=top, fname=f)
if with_dir:
for d in dirs:
yield get_res(parent=top, fname=d)
def create_codebase_resources(project, rootfs):
"""Create the CodebaseResource for a `rootfs` in `project`."""
for resource in rootfs.get_resources(with_dir=True):
pipes.make_codebase_resource(
project=project,
location=resource.location,
rootfs_path=resource.rootfs_path,
)
def has_hash_diff(install_file, codebase_resource):
"""
Return True if one of available hashes on both `install_file` and
`codebase_resource`, by hash type, is different.
For example: Alpine uses SHA1 while Debian uses MD5, we prefer the strongest hash
that's present.
"""
hash_types = ["sha512", "sha256", "sha1", "md5"]
for hash_type in hash_types:
# Find a suitable hash type that is present on both install_file and
# codebase_resource, skip otherwise.
share_hash_type = all(
[hasattr(install_file, hash_type), hasattr(codebase_resource, hash_type)]
)
if not share_hash_type:
continue
install_file_sum = getattr(install_file, hash_type)
codebase_resource_sum = getattr(codebase_resource, hash_type)
hashes_differ = all(
[
install_file_sum,
codebase_resource_sum,
install_file_sum != codebase_resource_sum,
]
)
if hashes_differ:
return True
return False
def package_getter(root_dir, **kwargs):
"""Return installed package objects."""
packages = plugin_package.get_installed_packages(root_dir)
for package in packages:
yield package.purl, package
def _create_system_package(project, purl, package):
"""Create system package and related resources."""
created_package = pipes.update_or_create_package(project, package.to_dict())
installed_files = []
if hasattr(package, "resources"):
installed_files = package.resources
# We have no files for this installed package, we cannot go further.
if not installed_files:
logger.info(f" No installed_files for: {purl}")
return
missing_resources = created_package.missing_resources[:]
modified_resources = created_package.modified_resources[:]
codebase_resources = project.codebaseresources.all()
for install_file in installed_files:
install_file_path = install_file.get_path(strip_root=True)
rootfs_path = pipes.normalize_path(install_file_path)
logger.info(f" installed file rootfs_path: {rootfs_path}")
try:
codebase_resource = codebase_resources.get(
rootfs_path=rootfs_path,
)
except ObjectDoesNotExist:
if rootfs_path not in missing_resources:
missing_resources.append(rootfs_path)
logger.info(f" installed file is missing: {rootfs_path}")
continue
if created_package not in codebase_resource.discovered_packages.all():
codebase_resource.discovered_packages.add(created_package)
codebase_resource.update(status=flag.SYSTEM_PACKAGE)
logger.info(f" added as system-package to: {purl}")
if has_hash_diff(install_file, codebase_resource):
if install_file.path not in modified_resources:
modified_resources.append(install_file.path)
created_package.update(
missing_resources=missing_resources,
modified_resources=modified_resources,
)
def scan_rootfs_for_system_packages(project, rootfs):
"""
Given a `project` Project and a `rootfs` RootFs, scan the `rootfs` for
installed system packages, and create a DiscoveredPackage for each.
Then for each installed DiscoveredPackage file, check if it exists
as a CodebaseResource. If exists, relate that CodebaseResource to its
DiscoveredPackage; otherwise, keep that as a missing file.
"""
if not rootfs.distro:
raise DistroNotFound("Distro not found.")
distro_id = rootfs.distro.identifier
if distro_id not in SUPPORTED_DISTROS:
raise DistroNotSupported(f'Distro "{distro_id}" is not supported.')
logger.info(f"rootfs location: {rootfs.location}")
installed_packages = rootfs.get_installed_packages(package_getter)
for index, (purl, package) in enumerate(installed_packages):
logger.info(f"Creating package #{index}: {purl}")
_create_system_package(project, purl, package)
def get_resource_with_md5(project, status):
"""
Return a queryset of CodebaseResource from a `project` that has a `status`,
a non-empty size, and md5.
"""
return (
project.codebaseresources.status(status=status)
.exclude(md5__exact="")
.exclude(size__exact=0)
)
def match_not_analyzed(
project,
reference_status=flag.SYSTEM_PACKAGE,
not_analyzed_status=flag.NOT_ANALYZED,
):
"""
Given a `project` Project :
1. Build an MD5 index of files assigned to a package that has a status of
`reference_status`
2. Attempt to match resources with status `not_analyzed_status` to that
index
3. Relate each matched CodebaseResource to the matching DiscoveredPackage and
set its status.
"""
known_resources = get_resource_with_md5(project=project, status=reference_status)
known_resources_by_md5_size = {
(
r.md5,
r.size,
): r
for r in known_resources
}
count = 0
matchables = get_resource_with_md5(project=project, status=not_analyzed_status)
for matchable in matchables:
key = (matchable.md5, matchable.size)
matched = known_resources_by_md5_size.get(key)
if matched is None:
continue
count += 1
package = matched.discovered_packages.all()[0]
matchable.discovered_packages.add(package)
matchable.update(status=reference_status)
def flag_uninteresting_codebase_resources(project):
"""
Flag any file that do not belong to any system package and determine if it's:
- A temp file
- Generated
- Log file of sorts (such as var) using few heuristics
"""
uninteresting_and_transient = (
"/tmp/", # nosec
"/etc/",
"/proc/",
"/dev/",
"/run/",
"/lib/apk/db/", # alpine specific
)
lookups = Q()
for segment in uninteresting_and_transient:
lookups |= Q(rootfs_path__startswith=segment)
qs = project.codebaseresources.no_status()
qs.filter(lookups).update(status=flag.IGNORED_NOT_INTERESTING)
def flag_ignorable_codebase_resources(project):
"""
Flag codebase resource using the glob patterns from commoncode.ignore of
ignorable files/directories, if their paths match an ignorable pattern.
"""
lookups = Q()
for pattern in default_ignores.keys():
# Translate glob pattern to regex
translated_pattern = fnmatch.translate(pattern)
# PostgreSQL does not like parts of Python regex
if translated_pattern.startswith("(?s"):
translated_pattern = translated_pattern.replace("(?s", "(?")
lookups |= Q(rootfs_path__icontains=pattern)
lookups |= Q(rootfs_path__iregex=translated_pattern)
qs = project.codebaseresources.no_status()
qs.filter(lookups).update(status=flag.IGNORED_DEFAULT_IGNORES)
def flag_data_files_with_no_clues(project):
"""
Flag CodebaseResources that have a file type of `data` and no detected clues
to be uninteresting.
"""
lookup = Q(
file_type="data",
copyrights=[],
holders=[],
authors=[],
license_detections=[],
detected_license_expression="",
emails=[],
urls=[],
)
qs = project.codebaseresources
qs.filter(lookup).update(status=flag.IGNORED_DATA_FILE_NO_CLUES)
def flag_media_files_as_uninteresting(project):
"""Flag CodebaseResources that are media files to be uninteresting."""
qs = project.codebaseresources.no_status()
qs.filter(is_media=True).update(status=flag.IGNORED_MEDIA_FILE)
def get_rootfs_data(root_fs):
"""Return a mapping of rootfs-related data given a ``root_fs``."""
return {
"name": os.path.basename(root_fs.location),
"distro": root_fs.distro.to_dict() if root_fs.distro else {},
}
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipes/rootfs.py
| 0.730001 | 0.173498 |
rootfs.py
|
pypi
|
import json
import sys
from pathlib import Path
from django.core.validators import EMPTY_VALUES
from attributecode.model import About
from packagedcode import APPLICATION_PACKAGE_DATAFILE_HANDLERS
from packagedcode.licensing import get_license_detections_and_expression
from packageurl import PackageURL
from python_inspector.resolve_cli import resolver_api
from scancode.api import get_package_data
from scanpipe.models import DiscoveredPackage
from scanpipe.pipes import cyclonedx
from scanpipe.pipes import spdx
"""
Resolve packages from manifest, lockfile, and SBOM.
"""
def resolve_packages(input_location):
"""Resolve the packages from manifest file."""
default_package_type = get_default_package_type(input_location)
if not default_package_type:
raise Exception(f"No package type found for {input_location}")
# The ScanCode.io resolvers take precedence over the ScanCode-toolkit ones.
resolver = resolver_registry.get(default_package_type)
if resolver:
resolved_packages = resolver(input_location=input_location)
else:
package_data = get_package_data(location=input_location)
resolved_packages = package_data.get("package_data", [])
return resolved_packages
def resolve_pypi_packages(input_location):
"""Resolve the PyPI packages from the `input_location` requirements file."""
python_version = f"{sys.version_info.major}{sys.version_info.minor}"
operating_system = "linux"
inspector_output = resolver_api(
requirement_files=[input_location],
python_version=python_version,
operating_system=operating_system,
prefer_source=True,
)
return inspector_output.packages
def resolve_about_package(input_location):
"""Resolve the package from the ``input_location`` .ABOUT file."""
about = About(location=input_location)
about_data = about.as_dict()
package_data = about_data.copy()
if package_url := about_data.get("package_url"):
package_url_data = PackageURL.from_string(package_url).to_dict(encode=True)
for field_name, value in package_url_data.items():
if value:
package_data[field_name] = value
if about_resource := about_data.get("about_resource"):
package_data["filename"] = list(about_resource.keys())[0]
if ignored_resources := about_data.get("ignored_resources"):
extra_data = {"ignored_resources": list(ignored_resources.keys())}
package_data["extra_data"] = extra_data
if license_expression := about_data.get("license_expression"):
package_data["declared_license_expression"] = license_expression
if notice_dict := about_data.get("notice_file"):
package_data["notice_text"] = list(notice_dict.values())[0]
for field_name, value in about_data.items():
if field_name.startswith("checksum_"):
package_data[field_name.replace("checksum_", "")] = value
package_data = DiscoveredPackage.clean_data(package_data)
return package_data
def resolve_about_packages(input_location):
"""
Wrap ``resolve_about_package`` to return a list as expected by the
InspectManifest pipeline.
"""
return [resolve_about_package(input_location)]
def convert_spdx_expression(license_expression_spdx):
"""
Return an ScanCode license expression from a SPDX `license_expression_spdx`
string.
"""
return get_license_detections_and_expression(license_expression_spdx)[1]
def spdx_package_to_discovered_package_data(spdx_package):
package_url_dict = {}
for ref in spdx_package.external_refs:
if ref.type == "purl":
purl = ref.locator
package_url_dict = PackageURL.from_string(purl).to_dict(encode=True)
checksum_data = {
checksum.algorithm.lower(): checksum.value
for checksum in spdx_package.checksums
}
declared_license_expression_spdx = spdx_package.license_concluded
declared_expression = ""
if declared_license_expression_spdx:
declared_expression = convert_spdx_expression(declared_license_expression_spdx)
package_data = {
"name": spdx_package.name,
"download_url": spdx_package.download_location,
"declared_license_expression": declared_expression,
"declared_license_expression_spdx": declared_license_expression_spdx,
"extracted_license_statement": spdx_package.license_declared,
"copyright": spdx_package.copyright_text,
"version": spdx_package.version,
"homepage_url": spdx_package.homepage,
"filename": spdx_package.filename,
"description": spdx_package.description,
"release_date": spdx_package.release_date,
**package_url_dict,
**checksum_data,
}
return {
key: value
for key, value in package_data.items()
if value not in [None, "", "NOASSERTION"]
}
def resolve_spdx_packages(input_location):
"""Resolve the packages from the `input_location` SPDX document file."""
input_path = Path(input_location)
spdx_document = json.loads(input_path.read_text())
try:
spdx.validate_document(spdx_document)
except Exception as e:
raise Exception(f'SPDX document "{input_path.name}" is not valid: {e}')
return [
spdx_package_to_discovered_package_data(spdx.Package.from_data(spdx_package))
for spdx_package in spdx_document.get("packages", [])
]
def cyclonedx_component_to_package_data(component_data):
"""Return package_data from CycloneDX component."""
extra_data = {}
component = component_data["cdx_package"]
package_url_dict = {}
if component.purl:
package_url_dict = PackageURL.from_string(component.purl).to_dict(encode=True)
declared_license = cyclonedx.get_declared_licenses(licenses=component.licenses)
if external_references := cyclonedx.get_external_references(component):
extra_data["externalReferences"] = external_references
if nested_components := component_data.get("nested_components"):
extra_data["nestedComponents"] = nested_components
package_data = {
"name": component.name,
"extracted_license_statement": declared_license,
"copyright": component.copyright,
"version": component.version,
"description": component.description,
"extra_data": extra_data,
**package_url_dict,
**cyclonedx.get_checksums(component),
**cyclonedx.get_properties_data(component),
}
return {
key: value for key, value in package_data.items() if value not in EMPTY_VALUES
}
def resolve_cyclonedx_packages(input_location):
"""Resolve the packages from the `input_location` CycloneDX document file."""
input_path = Path(input_location)
cyclonedx_document = json.loads(input_path.read_text())
try:
cyclonedx.validate_document(cyclonedx_document)
except Exception as e:
raise Exception(f'CycloneDX document "{input_path.name}" is not valid: {e}')
cyclonedx_bom = cyclonedx.get_bom(cyclonedx_document)
components = cyclonedx.get_components(cyclonedx_bom)
return [cyclonedx_component_to_package_data(component) for component in components]
def get_default_package_type(input_location):
"""
Return the package type associated with the provided `input_location`.
This type is used to get the related handler that knows how process the input.
"""
input_location = str(input_location)
for handler in APPLICATION_PACKAGE_DATAFILE_HANDLERS:
if handler.is_datafile(input_location):
return handler.default_package_type
if input_location.endswith((".spdx", ".spdx.json")):
return "spdx"
if input_location.endswith((".bom.json", ".cdx.json")):
return "cyclonedx"
if input_location.endswith(".json"):
if cyclonedx.is_cyclonedx_bom(input_location):
return "cyclonedx"
if spdx.is_spdx_document(input_location):
return "spdx"
# Mapping between the `default_package_type` its related resolver function
resolver_registry = {
"about": resolve_about_packages,
"pypi": resolve_pypi_packages,
"spdx": resolve_spdx_packages,
"cyclonedx": resolve_cyclonedx_packages,
}
def set_license_expression(package_data):
"""
Set the license expression from a detected license dict/str in provided
`package_data`.
"""
extracted_license_statement = package_data.get("extracted_license_statement")
declared_license_expression = package_data.get("declared_license_expression")
if extracted_license_statement and not declared_license_expression:
_, license_expression = get_license_detections_and_expression(
extracted_license_statement
)
if license_expression:
package_data["declared_license_expression"] = license_expression
return package_data
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipes/resolve.py
| 0.619817 | 0.194024 |
resolve.py
|
pypi
|
from scanpipe.pipelines import Pipeline
from scanpipe.pipes import d2d
from scanpipe.pipes import flag
from scanpipe.pipes import matchcode
from scanpipe.pipes import purldb
from scanpipe.pipes import scancode
class DeployToDevelop(Pipeline):
"""
Relate deploy and develop code trees.
This pipeline is expecting 2 archive files with "from-" and "to-" filename
prefixes as inputs:
- "from-[FILENAME]" archive containing the development source code
- "to-[FILENAME]" archive containing the deployment compiled code
"""
@classmethod
def steps(cls):
return (
cls.get_inputs,
cls.extract_inputs_to_codebase_directory,
cls.extract_archives_in_place,
cls.collect_and_create_codebase_resources,
cls.fingerprint_codebase_directories,
cls.flag_empty_files,
cls.flag_ignored_resources,
cls.map_about_files,
cls.map_checksum,
cls.find_java_packages,
cls.map_java_to_class,
cls.map_jar_to_source,
cls.map_javascript,
cls.match_purldb,
cls.map_javascript_post_purldb_match,
cls.map_javascript_path,
cls.map_javascript_colocation,
cls.map_thirdparty_npm_packages,
cls.map_path,
cls.flag_mapped_resources_archives_and_ignored_directories,
cls.scan_mapped_from_for_files,
)
purldb_package_extensions = [".jar", ".war", ".zip"]
purldb_resource_extensions = [
".map",
".js",
".mjs",
".ts",
".d.ts",
".jsx",
".tsx",
".css",
".scss",
".less",
".sass",
".soy",
".class",
]
def get_inputs(self):
"""Locate the ``from`` and ``to`` input files."""
self.from_files, self.to_files = d2d.get_inputs(self.project)
def extract_inputs_to_codebase_directory(self):
"""Extract input files to the project's codebase/ directory."""
inputs_with_codebase_path_destination = [
(self.from_files, self.project.codebase_path / d2d.FROM),
(self.to_files, self.project.codebase_path / d2d.TO),
]
errors = []
for input_files, codebase_path in inputs_with_codebase_path_destination:
for input_file_path in input_files:
errors += scancode.extract_archive(input_file_path, codebase_path)
if errors:
self.add_error("\n".join(errors))
def extract_archives_in_place(self):
"""Extract recursively from* and to* archives in place with extractcode."""
extract_errors = scancode.extract_archives(
self.project.codebase_path,
recurse=self.env.get("extract_recursively", True),
)
if extract_errors:
self.add_error("\n".join(extract_errors))
def collect_and_create_codebase_resources(self):
"""Collect and create codebase resources."""
d2d.collect_and_create_codebase_resources(self.project)
def fingerprint_codebase_directories(self):
"""Compute directory fingerprints for matching"""
matchcode.fingerprint_codebase_directories(self.project, to_codebase_only=True)
def map_about_files(self):
"""Map ``from/`` .ABOUT files to their related ``to/`` resources."""
d2d.map_about_files(project=self.project, logger=self.log)
def map_checksum(self):
"""Map using SHA1 checksum."""
d2d.map_checksum(project=self.project, checksum_field="sha1", logger=self.log)
def find_java_packages(self):
"""Find the java package of the .java source files."""
d2d.find_java_packages(self.project, logger=self.log)
def map_java_to_class(self):
"""Map a .class compiled file to its .java source."""
d2d.map_java_to_class(project=self.project, logger=self.log)
def map_jar_to_source(self):
"""Map .jar files to their related source directory."""
d2d.map_jar_to_source(project=self.project, logger=self.log)
def map_javascript(self):
"""
Map a packed or minified JavaScript, TypeScript, CSS and SCSS
to its source.
"""
d2d.map_javascript(project=self.project, logger=self.log)
def match_purldb(self):
"""Match selected files by extension and directories in PurlDB."""
if not purldb.is_available():
self.log("PurlDB is not available. Skipping.")
return
d2d.match_purldb_resources(
project=self.project,
extensions=self.purldb_package_extensions,
matcher_func=d2d.match_purldb_package,
logger=self.log,
)
d2d.match_purldb_directories(
project=self.project,
logger=self.log,
)
d2d.match_purldb_resources(
project=self.project,
extensions=self.purldb_resource_extensions,
matcher_func=d2d.match_purldb_resource,
logger=self.log,
)
def map_javascript_post_purldb_match(self):
"""Map minified javascript file based on existing PurlDB match."""
d2d.map_javascript_post_purldb_match(project=self.project, logger=self.log)
def map_javascript_path(self):
"""Map javascript file based on path."""
d2d.map_javascript_path(project=self.project, logger=self.log)
def map_javascript_colocation(self):
"""Map JavaScript files based on neighborhood file mapping."""
d2d.map_javascript_colocation(project=self.project, logger=self.log)
def map_thirdparty_npm_packages(self):
"""Map thirdparty package using package.json metadata."""
d2d.map_thirdparty_npm_packages(project=self.project, logger=self.log)
def map_path(self):
"""Map using path similarities."""
d2d.map_path(project=self.project, logger=self.log)
def flag_mapped_resources_archives_and_ignored_directories(self):
"""Flag all codebase resources that were mapped during the pipeline."""
flag.flag_mapped_resources(self.project)
flag.flag_ignored_directories(self.project)
d2d.flag_processed_archives(self.project)
def scan_mapped_from_for_files(self):
"""Scan mapped ``from/`` files for copyrights, licenses, emails, and urls."""
scan_files = d2d.get_from_files_for_scanning(self.project.codebaseresources)
scancode.scan_for_files(self.project, scan_files)
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipelines/deploy_to_develop.py
| 0.830663 | 0.236406 |
deploy_to_develop.py
|
pypi
|
from scanpipe.pipelines import Pipeline
from scanpipe.pipes import flag
from scanpipe.pipes import rootfs
from scanpipe.pipes import scancode
class RootFS(Pipeline):
"""Analyze a Linux root filesystem, aka rootfs."""
@classmethod
def steps(cls):
return (
cls.extract_input_files_to_codebase_directory,
cls.find_root_filesystems,
cls.collect_rootfs_information,
cls.collect_and_create_codebase_resources,
cls.collect_and_create_system_packages,
cls.flag_uninteresting_codebase_resources,
cls.flag_empty_files,
cls.flag_ignored_resources,
cls.scan_for_application_packages,
cls.match_not_analyzed_to_system_packages,
cls.scan_for_files,
cls.analyze_scanned_files,
cls.flag_not_analyzed_codebase_resources,
)
def extract_input_files_to_codebase_directory(self):
"""Extract root filesystem input archives with extractcode."""
input_files = self.project.inputs("*")
target_path = self.project.codebase_path
errors = []
for input_file in input_files:
extract_target = target_path / f"{input_file.name}-extract"
extract_errors = scancode.extract_archive(input_file, extract_target)
errors.extend(extract_errors)
if errors:
self.add_error("\n".join(errors))
def find_root_filesystems(self):
"""Find root filesystems in the project's codebase/."""
self.root_filesystems = list(rootfs.RootFs.from_project_codebase(self.project))
def collect_rootfs_information(self):
"""Collect and stores rootfs information on the project."""
rootfs_data = [
rootfs.get_rootfs_data(root_fs) for root_fs in self.root_filesystems
]
self.project.update_extra_data({"root_filesystems": rootfs_data})
def collect_and_create_codebase_resources(self):
"""Collect and label all image files as CodebaseResource."""
for rfs in self.root_filesystems:
rootfs.create_codebase_resources(self.project, rfs)
def collect_and_create_system_packages(self):
"""
Collect installed system packages for each rootfs based on the distro.
The collection of system packages is only available for known distros.
"""
with self.save_errors(rootfs.DistroNotFound, rootfs.DistroNotSupported):
for rfs in self.root_filesystems:
rootfs.scan_rootfs_for_system_packages(self.project, rfs)
def flag_uninteresting_codebase_resources(self):
"""Flag files—not worth tracking—that don’t belong to any system packages."""
rootfs.flag_uninteresting_codebase_resources(self.project)
def scan_for_application_packages(self):
"""Scan unknown resources for packages information."""
scancode.scan_for_application_packages(self.project)
def match_not_analyzed_to_system_packages(self):
"""
Match files with "not-yet-analyzed" status to files already belonging to
system packages.
"""
rootfs.match_not_analyzed(
self.project,
reference_status=flag.SYSTEM_PACKAGE,
not_analyzed_status=flag.NO_STATUS,
)
def match_not_analyzed_to_application_packages(self):
"""
Match files with "not-yet-analyzed" status to files already belonging to
application packages.
"""
# TODO: do it one rootfs at a time e.g. for rfs in self.root_filesystems:
rootfs.match_not_analyzed(
self.project,
reference_status=flag.APPLICATION_PACKAGE,
not_analyzed_status=flag.NO_STATUS,
)
def scan_for_files(self):
"""Scan unknown resources for copyrights, licenses, emails, and urls."""
scancode.scan_for_files(self.project)
def analyze_scanned_files(self):
"""Analyze single file scan results for completeness."""
flag.analyze_scanned_files(self.project)
def flag_not_analyzed_codebase_resources(self):
"""Check for any leftover files for sanity; there should be none."""
flag.flag_not_analyzed_codebase_resources(self.project)
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipelines/root_filesystems.py
| 0.714329 | 0.216529 |
root_filesystems.py
|
pypi
|
import json
from django.core.serializers.json import DjangoJSONEncoder
from commoncode.hash import multi_checksums
from scanpipe.pipelines import Pipeline
from scanpipe.pipes import input
from scanpipe.pipes import scancode
from scanpipe.pipes.scancode import extract_archive
class ScanPackage(Pipeline):
"""
Scan a single package archive with ScanCode-toolkit.
The output is a summary of the scan results in JSON format.
"""
@classmethod
def steps(cls):
return (
cls.get_package_archive_input,
cls.collect_archive_information,
cls.extract_archive_to_codebase_directory,
cls.run_scancode,
cls.load_inventory_from_toolkit_scan,
cls.make_summary_from_scan_results,
)
scancode_run_scan_args = {
"copyright": True,
"email": True,
"info": True,
"license": True,
"license_text": True,
"package": True,
"url": True,
"classify": True,
"summary": True,
}
def get_package_archive_input(self):
"""Locate the input package archive in the project's input/ directory."""
input_files = self.project.input_files
inputs = list(self.project.inputs())
if len(inputs) != 1 or len(input_files) != 1:
raise Exception("Only 1 input file supported")
self.archive_path = inputs[0]
def collect_archive_information(self):
"""Collect and store information about the input archive in the project."""
self.project.update_extra_data(
{
"filename": self.archive_path.name,
"size": self.archive_path.stat().st_size,
**multi_checksums(self.archive_path),
}
)
def extract_archive_to_codebase_directory(self):
"""Extract package archive with extractcode."""
extract_errors = extract_archive(self.archive_path, self.project.codebase_path)
if extract_errors:
self.add_error("\n".join(extract_errors))
def run_scancode(self):
"""Scan extracted codebase/ content."""
scan_output_path = self.project.get_output_file_path("scancode", "json")
self.scan_output_location = str(scan_output_path.absolute())
run_scan_args = self.scancode_run_scan_args.copy()
if license_score := self.project.get_env("scancode_license_score"):
run_scan_args["license_score"] = license_score
errors = scancode.run_scan(
location=str(self.project.codebase_path),
output_file=self.scan_output_location,
run_scan_args=run_scan_args,
)
if errors:
raise scancode.ScancodeError(errors)
if not scan_output_path.exists():
raise FileNotFoundError("ScanCode output not available.")
def load_inventory_from_toolkit_scan(self):
"""Process a JSON Scan results to populate codebase resources and packages."""
input.load_inventory_from_toolkit_scan(self.project, self.scan_output_location)
def make_summary_from_scan_results(self):
"""Build a summary in JSON format from the generated scan results."""
summary = scancode.make_results_summary(self.project, self.scan_output_location)
output_file = self.project.get_output_file_path("summary", "json")
with output_file.open("w") as summary_file:
summary_file.write(json.dumps(summary, indent=2, cls=DjangoJSONEncoder))
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipelines/scan_package.py
| 0.653901 | 0.213603 |
scan_package.py
|
pypi
|
from scanpipe.pipelines.root_filesystems import RootFS
from scanpipe.pipes import docker
from scanpipe.pipes import rootfs
class Docker(RootFS):
"""Analyze Docker images."""
@classmethod
def steps(cls):
return (
cls.extract_images,
cls.extract_layers,
cls.find_images_os_and_distro,
cls.collect_images_information,
cls.collect_and_create_codebase_resources,
cls.collect_and_create_system_packages,
cls.flag_uninteresting_codebase_resources,
cls.flag_empty_files,
cls.flag_ignored_resources,
cls.scan_for_application_packages,
cls.scan_for_files,
cls.analyze_scanned_files,
cls.flag_not_analyzed_codebase_resources,
)
def extract_images(self):
"""Extract images from input tarballs."""
self.images, errors = docker.extract_images_from_inputs(self.project)
if not self.images:
raise Exception("No images found in project input files.")
if errors:
self.add_error("\n".join(errors))
def extract_layers(self):
"""Extract layers from input images."""
errors = docker.extract_layers_from_images(self.project, self.images)
if errors:
self.add_error("\n".join(errors))
def find_images_os_and_distro(self):
"""Find the operating system and distro of input images."""
for image in self.images:
image.get_and_set_distro()
def collect_images_information(self):
"""Collect and store image information in a project."""
images_data = [docker.get_image_data(image) for image in self.images]
self.project.update_extra_data({"images": images_data})
def collect_and_create_codebase_resources(self):
"""Collect and labels all image files as CodebaseResources."""
for image in self.images:
docker.create_codebase_resources(self.project, image)
def collect_and_create_system_packages(self):
"""Collect installed system packages for each layer based on the distro."""
with self.save_errors(rootfs.DistroNotFound, rootfs.DistroNotSupported):
for image in self.images:
docker.scan_image_for_system_packages(self.project, image)
def flag_uninteresting_codebase_resources(self):
"""Flag files that don't belong to any system package."""
docker.flag_whiteout_codebase_resources(self.project)
rootfs.flag_uninteresting_codebase_resources(self.project)
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipelines/docker.py
| 0.805403 | 0.27661 |
docker.py
|
pypi
|
import inspect
import logging
import traceback
import warnings
from contextlib import contextmanager
from functools import wraps
from pydoc import getdoc
from pydoc import splitdoc
from timeit import default_timer as timer
from django.utils import timezone
from pyinstrument import Profiler
from scanpipe import humanize_time
logger = logging.getLogger(__name__)
class BasePipeline:
"""Base class for all pipelines."""
def __init__(self, run):
"""Load the Run and Project instances."""
self.run = run
self.project = run.project
self.pipeline_name = run.pipeline_name
self.env = self.project.get_env()
@classmethod
def steps(cls):
raise NotImplementedError
@classmethod
def get_steps(cls):
"""
Raise a deprecation warning when the steps are defined as a tuple instead of
a classmethod.
"""
if callable(cls.steps):
return cls.steps()
warnings.warn(
f"Defining ``steps`` as a tuple is deprecated in {cls} "
f"Use a ``steps(cls)`` classmethod instead."
)
return cls.steps
@classmethod
def get_doc(cls):
"""Get the doc string of this pipeline."""
return getdoc(cls)
@classmethod
def get_graph(cls):
"""Return a graph of steps."""
return [
{"name": step.__name__, "doc": getdoc(step)} for step in cls.get_steps()
]
@classmethod
def get_info(cls):
"""Get a dictionary of combined information data about this pipeline."""
summary, description = splitdoc(cls.get_doc())
return {
"summary": summary,
"description": description,
"steps": cls.get_graph(),
}
@classmethod
def get_summary(cls):
"""Get the doc string summary."""
return cls.get_info()["summary"]
def log(self, message):
"""Log the given `message` to the current module logger and Run instance."""
now_as_localtime = timezone.localtime(timezone.now())
timestamp = now_as_localtime.strftime("%Y-%m-%d %H:%M:%S.%f")[:-4]
message = f"{timestamp} {message}"
logger.info(message)
self.run.append_to_log(message)
def execute(self):
"""Execute each steps in the order defined on this pipeline class."""
self.log(f"Pipeline [{self.pipeline_name}] starting")
steps = self.get_steps()
steps_count = len(steps)
for current_index, step in enumerate(steps, start=1):
step_name = step.__name__
self.run.set_current_step(f"{current_index}/{steps_count} {step_name}")
self.log(f"Step [{step_name}] starting")
start_time = timer()
try:
step(self)
except Exception as e:
self.log("Pipeline failed")
tb = "".join(traceback.format_tb(e.__traceback__))
return 1, f"{e}\n\nTraceback:\n{tb}"
run_time = timer() - start_time
self.log(f"Step [{step.__name__}] completed in {humanize_time(run_time)}")
self.run.set_current_step("")
self.log("Pipeline completed")
return 0, ""
def add_error(self, exception):
"""Create a ``ProjectMessage`` ERROR record on the current `project`."""
self.project.add_error(model=self.pipeline_name, exception=exception)
@contextmanager
def save_errors(self, *exceptions):
"""
Context manager to save specified exceptions as ``ProjectMessage`` in the
database.
Example in a Pipeline step:
with self.save_errors(rootfs.DistroNotFound):
rootfs.scan_rootfs_for_system_packages(self.project, rfs)
"""
try:
yield
except exceptions as error:
self.add_error(exception=error)
class Pipeline(BasePipeline):
"""Main class for all pipelines including common step methods."""
def flag_empty_files(self):
"""Flag empty files."""
from scanpipe.pipes import flag
flag.flag_empty_files(self.project)
def flag_ignored_resources(self):
"""Flag ignored resources based on Project ``ignored_patterns`` setting."""
from scanpipe.pipes import flag
if ignored_patterns := self.env.get("ignored_patterns"):
flag.flag_ignored_patterns(self.project, patterns=ignored_patterns)
def is_pipeline(obj):
"""
Return True if the `obj` is a subclass of `Pipeline` except for the
`Pipeline` class itself.
"""
return inspect.isclass(obj) and issubclass(obj, Pipeline) and obj is not Pipeline
def profile(step):
"""
Profile a Pipeline step and save the results as HTML file in the project output
directory.
Usage:
@profile
def step(self):
pass
"""
@wraps(step)
def wrapper(*arg, **kwargs):
pipeline_instance = arg[0]
project = pipeline_instance.project
with Profiler() as profiler:
result = step(*arg, **kwargs)
output_file = project.get_output_file_path("profile", "html")
output_file.write_text(profiler.output_html())
pipeline_instance.log(f"Profiling results at {output_file.resolve()}")
return result
return wrapper
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipelines/__init__.py
| 0.814791 | 0.240418 |
__init__.py
|
pypi
|
from scanpipe import pipes
from scanpipe.pipelines import Pipeline
from scanpipe.pipes import scancode
from scanpipe.pipes.input import copy_inputs
class ScanCodebase(Pipeline):
"""
Scan a codebase with ScanCode-toolkit.
If the codebase consists of several packages and dependencies, it will try to
resolve and scan those too.
Input files are copied to the project's codebase/ directory and are extracted
in place before running the scan.
Alternatively, the code can be manually copied to the project codebase/
directory.
"""
@classmethod
def steps(cls):
return (
cls.copy_inputs_to_codebase_directory,
cls.extract_archives,
cls.collect_and_create_codebase_resources,
cls.flag_empty_files,
cls.flag_ignored_resources,
cls.scan_for_application_packages,
cls.scan_for_files,
)
def copy_inputs_to_codebase_directory(self):
"""
Copy input files to the project's codebase/ directory.
The code can also be copied there prior to running the Pipeline.
"""
copy_inputs(self.project.inputs(), self.project.codebase_path)
def extract_archives(self):
"""Extract archives with extractcode."""
extract_errors = scancode.extract_archives(
location=self.project.codebase_path,
recurse=self.env.get("extract_recursively", True),
)
if extract_errors:
self.add_error("\n".join(extract_errors))
def collect_and_create_codebase_resources(self):
"""Collect and create codebase resources."""
for resource_path in self.project.walk_codebase_path():
pipes.make_codebase_resource(
project=self.project,
location=str(resource_path),
)
def scan_for_application_packages(self):
"""Scan unknown resources for packages information."""
scancode.scan_for_application_packages(self.project)
def scan_for_files(self):
"""Scan unknown resources for copyrights, licenses, emails, and urls."""
scancode.scan_for_files(self.project)
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipelines/scan_codebase.py
| 0.739046 | 0.228081 |
scan_codebase.py
|
pypi
|
from scanpipe.pipelines import Pipeline
from scanpipe.pipes import resolve
from scanpipe.pipes import update_or_create_package
class InspectManifest(Pipeline):
"""
Inspect one or more manifest files and resolve its packages.
Supports:
- BOM: SPDX document, CycloneDX BOM, AboutCode ABOUT file
- Python: requirements.txt, setup.py, setup.cfg, Pipfile.lock
- JavaScript: yarn.lock lockfile, npm package-lock.json lockfile
- Java: Java JAR MANIFEST.MF, Gradle build script
- Ruby: RubyGems gemspec manifest, RubyGems Bundler Gemfile.lock
- Rust: Rust Cargo.lock dependencies lockfile, Rust Cargo.toml package manifest
- PHP: PHP composer lockfile, PHP composer manifest
- NuGet: nuspec package manifest
- Dart: pubspec manifest, pubspec lockfile
- OS: FreeBSD compact package manifest, Debian installed packages database
Full list available at https://scancode-toolkit.readthedocs.io/en/
doc-update-licenses/reference/available_package_parsers.html
"""
@classmethod
def steps(cls):
return (
cls.get_manifest_inputs,
cls.get_packages_from_manifest,
cls.create_resolved_packages,
)
def get_manifest_inputs(self):
"""Locate all the manifest files from the project's input/ directory."""
self.input_locations = [
str(input.absolute()) for input in self.project.inputs()
]
def get_packages_from_manifest(self):
"""Get packages data from manifest files."""
self.resolved_packages = []
for input_location in self.input_locations:
packages = resolve.resolve_packages(input_location)
if not packages:
raise Exception(f"No packages could be resolved for {input_location}")
self.resolved_packages.extend(packages)
def create_resolved_packages(self):
"""Create the resolved packages and their dependencies in the database."""
for package_data in self.resolved_packages:
package_data = resolve.set_license_expression(package_data)
dependencies = package_data.pop("dependencies", [])
update_or_create_package(self.project, package_data)
for dependency_data in dependencies:
resolved_package = dependency_data.get("resolved_package")
if resolved_package:
resolved_package.pop("dependencies", [])
update_or_create_package(self.project, resolved_package)
|
/scancodeio-32.6.0.tar.gz/scancodeio-32.6.0/scanpipe/pipelines/inspect_manifest.py
| 0.827584 | 0.151341 |
inspect_manifest.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.