Skip to content

Commit

Permalink
[MONAI] python code formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
monai-bot committed May 5, 2020
1 parent 3c035fd commit 6533b86
Show file tree
Hide file tree
Showing 194 changed files with 4,727 additions and 4,536 deletions.
107 changes: 60 additions & 47 deletions docs/source/conf.py
Expand Up @@ -14,107 +14,120 @@
import sys
import subprocess

sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")))
print(sys.path)

import monai # noqa: E402

# -- Project information -----------------------------------------------------
project = 'MONAI'
copyright = '2020, MONAI Contributors'
author = 'MONAI Contributors'
project = "MONAI"
copyright = "2020, MONAI Contributors"
author = "MONAI Contributors"

# The full version, including alpha/beta/rc tags
short_version = monai.__version__.split('+')[0]
short_version = monai.__version__.split("+")[0]
release = short_version
version = short_version

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['transforms', 'networks', 'metrics', 'engine', 'data',
'application', 'config', 'handlers', 'losses', 'visualize', 'utils']
exclude_patterns = [
"transforms",
"networks",
"metrics",
"engine",
"data",
"application",
"config",
"handlers",
"losses",
"visualize",
"utils",
]


def generate_apidocs(*args):
"""Generate API docs automatically by trawling the available modules"""
module_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'monai'))
output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'apidocs'))
apidoc_command_path = 'sphinx-apidoc'
if hasattr(sys, 'real_prefix'): # called from a virtualenv
apidoc_command_path = os.path.join(sys.prefix, 'bin', 'sphinx-apidoc')
module_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "monai"))
output_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "apidocs"))
apidoc_command_path = "sphinx-apidoc"
if hasattr(sys, "real_prefix"): # called from a virtualenv
apidoc_command_path = os.path.join(sys.prefix, "bin", "sphinx-apidoc")
apidoc_command_path = os.path.abspath(apidoc_command_path)
print('output_path {}'.format(output_path))
print('module_path {}'.format(module_path))
print("output_path {}".format(output_path))
print("module_path {}".format(module_path))
subprocess.check_call(
[apidoc_command_path, '-f', '-e'] +
['-o', output_path] +
[module_path] +
[os.path.join(module_path, p) for p in exclude_patterns])
[apidoc_command_path, "-f", "-e"]
+ ["-o", output_path]
+ [module_path]
+ [os.path.join(module_path, p) for p in exclude_patterns]
)


def setup(app):
# Hook to allow for automatic generation of API docs
# before doc deployment begins.
app.connect('builder-inited', generate_apidocs)
app.connect("builder-inited", generate_apidocs)


# -- General configuration ---------------------------------------------------

# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'restructuredtext',
'.md': 'markdown',
".rst": "restructuredtext",
".txt": "restructuredtext",
".md": "markdown",
}

extensions = [
'recommonmark',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel'
"recommonmark",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"sphinx.ext.autosectionlabel",
]

autoclass_content = 'both'
autoclass_content = "both"
add_module_names = False
autosectionlabel_prefix_document = True

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
templates_path = ["_templates"]

# -- Options for HTML output -------------------------------------------------

# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme = "sphinx_rtd_theme"
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
'collapse_navigation': True,
'display_version': True,
'sticky_navigation': True, # Set to False to disable the sticky nav while scrolling.
'logo_only': True, # if we have a html_logo below, this shows /only/ the logo with no title text
'style_nav_header_background': '#FBFBFB',
"collapse_navigation": True,
"display_version": True,
"sticky_navigation": True, # Set to False to disable the sticky nav while scrolling.
"logo_only": True, # if we have a html_logo below, this shows /only/ the logo with no title text
"style_nav_header_background": "#FBFBFB",
}
html_context = {
'display_github': True,
'github_user': 'Project-MONAI',
'github_repo': 'MONAI',
'github_version': 'master',
'conf_py_path': '/docs/',
"display_github": True,
"github_user": "Project-MONAI",
"github_repo": "MONAI",
"github_version": "master",
"conf_py_path": "/docs/",
}
html_scaled_image_link = False
html_show_sourcelink = True
html_favicon = '../images/favicon.ico'
html_logo = '../images/MONAI-logo-color.png'
html_favicon = "../images/favicon.ico"
html_logo = "../images/MONAI-logo-color.png"

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
html_css_files = ['custom.css']
html_static_path = ["../_static"]
html_css_files = ["custom.css"]
51 changes: 21 additions & 30 deletions examples/classification_3d/densenet_evaluation_array.py
Expand Up @@ -19,55 +19,45 @@
from monai.data import NiftiDataset, CSVSaver
from monai.transforms import Compose, AddChannel, ScaleIntensity, Resize, ToTensor


def main():
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)

# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
images = [
'/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz'
"/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz",
]
# 2 binary labels for gender classification: man and woman
labels = np.array([
0, 0, 1, 0, 1, 0, 1, 0, 1, 0
])
labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0])

# Define transforms for image
val_transforms = Compose([
ScaleIntensity(),
AddChannel(),
Resize((96, 96, 96)),
ToTensor()
])
val_transforms = Compose([ScaleIntensity(), AddChannel(), Resize((96, 96, 96)), ToTensor()])

# Define nifti dataset
val_ds = NiftiDataset(image_files=images, labels=labels, transform=val_transforms, image_only=False)
# create a validation data loader
val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())

# Create DenseNet121
device = torch.device('cuda:0')
model = monai.networks.nets.densenet.densenet121(
spatial_dims=3,
in_channels=1,
out_channels=2,
).to(device)
device = torch.device("cuda:0")
model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2,).to(device)

model.load_state_dict(torch.load('best_metric_model.pth'))
model.load_state_dict(torch.load("best_metric_model.pth"))
model.eval()
with torch.no_grad():
num_correct = 0.
num_correct = 0.0
metric_count = 0
saver = CSVSaver(output_dir='./output')
saver = CSVSaver(output_dir="./output")
for val_data in val_loader:
val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
val_outputs = model(val_images).argmax(dim=1)
Expand All @@ -76,8 +66,9 @@ def main():
num_correct += value.sum().item()
saver.save_batch(val_outputs, val_data[2])
metric = num_correct / metric_count
print('evaluation metric:', metric)
print("evaluation metric:", metric)
saver.finalize()

if __name__ == '__main__':

if __name__ == "__main__":
main()
66 changes: 32 additions & 34 deletions examples/classification_3d/densenet_evaluation_dict.py
Expand Up @@ -19,66 +19,64 @@
from monai.transforms import Compose, LoadNiftid, AddChanneld, ScaleIntensityd, Resized, ToTensord
from monai.data import CSVSaver


def main():
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)

# IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
images = [
'/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz',
'/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz'
"/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz",
"/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz",
]
# 2 binary labels for gender classification: man and woman
labels = np.array([
0, 0, 1, 0, 1, 0, 1, 0, 1, 0
])
val_files = [{'img': img, 'label': label} for img, label in zip(images, labels)]
labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0])
val_files = [{"img": img, "label": label} for img, label in zip(images, labels)]

# Define transforms for image
val_transforms = Compose([
LoadNiftid(keys=['img']),
AddChanneld(keys=['img']),
ScaleIntensityd(keys=['img']),
Resized(keys=['img'], spatial_size=(96, 96, 96)),
ToTensord(keys=['img'])
])
val_transforms = Compose(
[
LoadNiftid(keys=["img"]),
AddChanneld(keys=["img"]),
ScaleIntensityd(keys=["img"]),
Resized(keys=["img"], spatial_size=(96, 96, 96)),
ToTensord(keys=["img"]),
]
)

# create a validation data loader
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())

# Create DenseNet121
device = torch.device('cuda:0')
model = monai.networks.nets.densenet.densenet121(
spatial_dims=3,
in_channels=1,
out_channels=2,
).to(device)
device = torch.device("cuda:0")
model = monai.networks.nets.densenet.densenet121(spatial_dims=3, in_channels=1, out_channels=2,).to(device)

model.load_state_dict(torch.load('best_metric_model.pth'))
model.load_state_dict(torch.load("best_metric_model.pth"))
model.eval()
with torch.no_grad():
num_correct = 0.
num_correct = 0.0
metric_count = 0
saver = CSVSaver(output_dir='./output')
saver = CSVSaver(output_dir="./output")
for val_data in val_loader:
val_images, val_labels = val_data['img'].to(device), val_data['label'].to(device)
val_images, val_labels = val_data["img"].to(device), val_data["label"].to(device)
val_outputs = model(val_images).argmax(dim=1)
value = torch.eq(val_outputs, val_labels)
metric_count += len(value)
num_correct += value.sum().item()
saver.save_batch(val_outputs, {'filename_or_obj': val_data['img.filename_or_obj']})
saver.save_batch(val_outputs, {"filename_or_obj": val_data["img.filename_or_obj"]})
metric = num_correct / metric_count
print('evaluation metric:', metric)
print("evaluation metric:", metric)
saver.finalize()

if __name__ == '__main__':

if __name__ == "__main__":
main()

0 comments on commit 6533b86

Please sign in to comment.