torch_em.metric.instance_segmentation_metric
1from functools import partial 2 3import numpy as np 4import elf.evaluation as elfval 5import elf.segmentation as elfseg 6import elf.segmentation.embeddings as elfemb 7import torch 8import torch.nn as nn 9import vigra 10from elf.segmentation.watershed import apply_size_filter 11 12 13class BaseInstanceSegmentationMetric(nn.Module): 14 def __init__(self, segmenter, metric, to_numpy=True): 15 super().__init__() 16 self.segmenter = segmenter 17 self.metric = metric 18 self.to_numpy = to_numpy 19 20 def forward(self, input_, target): 21 if self.to_numpy: 22 input_ = input_.detach().cpu().numpy().astype("float32") 23 target = target.detach().cpu().numpy() 24 assert input_.ndim == target.ndim 25 assert len(input_) == len(target) 26 scores = [] 27 # compute the metric per batch 28 for pred, trgt in zip(input_, target): 29 seg = self.segmenter(pred) 30 # by convention we assume that the segmentation channel is always in the last channel of trgt 31 scores.append(self.metric(seg, trgt[-1].astype("uint32"))) 32 return torch.tensor(scores).mean() 33 34 35# 36# Segmenters 37# 38 39def filter_sizes(seg, min_seg_size, hmap=None): 40 seg_ids, counts = np.unique(seg, return_counts=True) 41 if hmap is None: 42 bg_ids = seg_ids[counts < min_seg_size] 43 seg[np.isin(seg, bg_ids)] = 0 44 else: 45 ndim = seg.ndim 46 hmap_ = hmap if hmap.ndim == ndim else np.max(hmap, axis=0) 47 seg, _ = apply_size_filter(seg, hmap_, min_seg_size) 48 return seg 49 50 51class MWS: 52 def __init__(self, offsets, with_background, min_seg_size, strides=None): 53 self.offsets = offsets 54 self.with_background = with_background 55 self.min_seg_size = min_seg_size 56 if strides is None: 57 strides = [4] * len(offsets[0]) 58 assert len(strides) == len(offsets[0]) 59 self.strides = strides 60 61 def __call__(self, affinities): 62 if self.with_background: 63 assert len(affinities) == len(self.offsets) + 1 64 mask, affinities = affinities[0], affinities[1:] 65 else: 66 assert len(affinities) == len(self.offsets) 67 mask = None 68 seg = elfseg.mutex_watershed.mutex_watershed(affinities, self.offsets, self.strides, 69 randomize_strides=True, mask=mask).astype("uint32") 70 if self.min_seg_size > 0: 71 seg = filter_sizes(seg, self.min_seg_size, 72 hmap=None if self.with_background else affinities) 73 return seg 74 75 76class EmbeddingMWS: 77 def __init__(self, delta, offsets, with_background, min_seg_size, strides=None): 78 self.delta = delta 79 self.offsets = offsets 80 self.with_background = with_background 81 self.min_seg_size = min_seg_size 82 if strides is None: 83 strides = [4] * len(offsets[0]) 84 assert len(strides) == len(offsets[0]) 85 self.strides = strides 86 87 def merge_background(self, seg, embeddings): 88 seg += 1 89 seg_ids, counts = np.unique(seg, return_counts=True) 90 bg_seg = seg_ids[np.argmax(counts)] 91 mean_embeddings = [] 92 for emb in embeddings: 93 mean_embeddings.append(vigra.analysis.extractRegionFeatures(emb, seg, features=["mean"])["mean"][None]) 94 mean_embeddings = np.concatenate(mean_embeddings, axis=0) 95 bg_embed = mean_embeddings[:, bg_seg][:, None] 96 bg_probs = elfemb._embeddings_to_probabilities(mean_embeddings, bg_embed, self.delta, 0) 97 bg_ids = np.where(bg_probs > 0.5) 98 seg[np.isin(seg, bg_ids)] = 0 99 vigra.analysis.relabelConsecutive(seg, out=seg) 100 return seg 101 102 def __call__(self, embeddings): 103 weight = partial(elfemb.discriminative_loss_weight, delta=self.delta) 104 seg = elfemb.segment_embeddings_mws( 105 embeddings, "l2", self.offsets, strides=self.strides, weight_function=weight 106 ).astype("uint32") 107 if self.with_background: 108 seg = self.merge_background(seg, embeddings) 109 if self.min_seg_size > 0: 110 seg = filter_sizes(seg, self.min_seg_size) 111 return seg 112 113 114class Multicut: 115 def __init__(self, min_seg_size, anisotropic=False, dt_threshold=0.25, sigma_seeds=2.0, solver="decomposition"): 116 self.min_seg_size = min_seg_size 117 self.anisotropic = anisotropic 118 self.dt_threshold = dt_threshold 119 self.sigma_seeds = sigma_seeds 120 self.solver = solver 121 122 def __call__(self, boundaries): 123 if boundaries.shape[0] == 1: 124 boundaries = boundaries[0] 125 assert boundaries.ndim in (2, 3), f"{boundaries.ndim}" 126 if self.anisotropic and boundaries.ndim == 3: 127 ws, max_id = elfseg.stacked_watershed(boundaries, threshold=self.dt_threshold, 128 sigma_seed=self.sigma_seeds, 129 sigma_weights=self.sigma_seeds, 130 n_threads=1) 131 else: 132 ws, max_id = elfseg.distance_transform_watershed(boundaries, threshold=self.dt_threshold, 133 sigma_seeds=self.sigma_seeds, 134 sigma_weights=self.sigma_seeds) 135 rag = elfseg.compute_rag(ws, max_id + 1, n_threads=1) 136 feats = elfseg.compute_boundary_mean_and_length(rag, boundaries, n_threads=1)[:, 0] 137 costs = elfseg.compute_edge_costs(feats) 138 solver = elfseg.get_multicut_solver(self.solver) 139 node_labels = solver(rag, costs, n_threads=1) 140 seg = elfseg.project_node_labels_to_pixels(rag, node_labels, n_threads=1).astype("uint32") 141 if self.min_seg_size > 0: 142 seg = filter_sizes(seg, self.min_seg_size, hmap=boundaries) 143 return seg 144 145 146class HDBScan: 147 def __init__(self, min_size, eps, remove_largest): 148 self.min_size = min_size 149 self.eps = eps 150 self.remove_largest = remove_largest 151 152 def __call__(self, embeddings): 153 return elfemb.segment_hdbscan(embeddings, self.min_size, self.eps, self.remove_largest) 154 155 156# 157# Metrics 158# 159 160class IOUError: 161 def __init__(self, threshold=0.5, metric="precision"): 162 self.threshold = threshold 163 self.metric = metric 164 165 def __call__(self, seg, target): 166 score = 1.0 - elfval.matching(seg, target, threshold=self.threshold)[self.metric] 167 return score 168 169 170class VariationOfInformation: 171 def __call__(self, seg, target): 172 vis, vim = elfval.variation_of_information(seg, target) 173 return vis + vim 174 175 176class AdaptedRandError: 177 def __call__(self, seg, target): 178 are, _ = elfval.rand_index(seg, target) 179 return are 180 181 182class SymmetricBestDice: 183 def __call__(self, seg, target): 184 score = 1.0 - elfval.symmetric_best_dice_score(seg, target) 185 return score 186 187 188# 189# Prefab Full Metrics 190# 191 192 193class EmbeddingMWSIOUMetric(BaseInstanceSegmentationMetric): 194 def __init__(self, delta, offsets, min_seg_size, iou_threshold=0.5, strides=None): 195 segmenter = EmbeddingMWS(delta, offsets, with_background=True, min_seg_size=min_seg_size) 196 metric = IOUError(iou_threshold) 197 super().__init__(segmenter, metric) 198 self.init_kwargs = {"delta": delta, "offsets": offsets, "min_seg_size": min_seg_size, 199 "iou_threshold": iou_threshold, "strides": strides} 200 201 202class EmbeddingMWSSBDMetric(BaseInstanceSegmentationMetric): 203 def __init__(self, delta, offsets, min_seg_size, strides=None): 204 segmenter = EmbeddingMWS(delta, offsets, with_background=True, min_seg_size=min_seg_size) 205 metric = SymmetricBestDice() 206 super().__init__(segmenter, metric) 207 self.init_kwargs = {"delta": delta, "offsets": offsets, "min_seg_size": min_seg_size, "strides": strides} 208 209 210class EmbeddingMWSVOIMetric(BaseInstanceSegmentationMetric): 211 def __init__(self, delta, offsets, min_seg_size, strides=None): 212 segmenter = EmbeddingMWS(delta, offsets, with_background=False, min_seg_size=min_seg_size) 213 metric = VariationOfInformation() 214 super().__init__(segmenter, metric) 215 self.init_kwargs = {"delta": delta, "offsets": offsets, "min_seg_size": min_seg_size, "strides": strides} 216 217 218class EmbeddingMWSRandMetric(BaseInstanceSegmentationMetric): 219 def __init__(self, delta, offsets, min_seg_size, strides=None): 220 segmenter = EmbeddingMWS(delta, offsets, with_background=False, min_seg_size=min_seg_size) 221 metric = AdaptedRandError() 222 super().__init__(segmenter, metric) 223 self.init_kwargs = {"delta": delta, "offsets": offsets, "min_seg_size": min_seg_size, "strides": strides} 224 225 226class HDBScanIOUMetric(BaseInstanceSegmentationMetric): 227 def __init__(self, min_size, eps, iou_threshold=0.5): 228 segmenter = HDBScan(min_size=min_size, eps=eps, remove_largest=True) 229 metric = IOUError(iou_threshold) 230 super().__init__(segmenter, metric) 231 self.init_kwargs = {"min_size": min_size, "eps": eps, "iou_threshold": iou_threshold} 232 233 234class HDBScanSBDMetric(BaseInstanceSegmentationMetric): 235 def __init__(self, min_size, eps): 236 segmenter = HDBScan(min_size=min_size, eps=eps, remove_largest=True) 237 metric = SymmetricBestDice() 238 super().__init__(segmenter, metric) 239 self.init_kwargs = {"min_size": min_size, "eps": eps} 240 241 242class HDBScanRandMetric(BaseInstanceSegmentationMetric): 243 def __init__(self, min_size, eps): 244 segmenter = HDBScan(min_size=min_size, eps=eps, remove_largest=True) 245 metric = AdaptedRandError() 246 super().__init__(segmenter, metric) 247 self.init_kwargs = {"min_size": min_size, "eps": eps} 248 249 250class HDBScanVOIMetric(BaseInstanceSegmentationMetric): 251 def __init__(self, min_size, eps): 252 segmenter = HDBScan(min_size=min_size, eps=eps, remove_largest=True) 253 metric = VariationOfInformation() 254 super().__init__(segmenter, metric) 255 self.init_kwargs = {"min_size": min_size, "eps": eps} 256 257 258class MulticutVOIMetric(BaseInstanceSegmentationMetric): 259 def __init__(self, min_seg_size, anisotropic=False, dt_threshold=0.25, sigma_seeds=2.0): 260 segmenter = Multicut(dt_threshold, anisotropic, sigma_seeds) 261 metric = VariationOfInformation() 262 super().__init__(segmenter, metric) 263 self.init_kwargs = {"anisotropic": anisotropic, "min_seg_size": min_seg_size, 264 "dt_threshold": dt_threshold, "sigma_seeds": sigma_seeds} 265 266 267class MulticutRandMetric(BaseInstanceSegmentationMetric): 268 def __init__(self, min_seg_size, anisotropic=False, dt_threshold=0.25, sigma_seeds=2.0): 269 segmenter = Multicut(dt_threshold, anisotropic, sigma_seeds) 270 metric = AdaptedRandError() 271 super().__init__(segmenter, metric) 272 self.init_kwargs = {"anisotropic": anisotropic, "min_seg_size": min_seg_size, 273 "dt_threshold": dt_threshold, "sigma_seeds": sigma_seeds} 274 275 276class MWSIOUMetric(BaseInstanceSegmentationMetric): 277 def __init__(self, offsets, min_seg_size, iou_threshold=0.5, strides=None): 278 segmenter = MWS(offsets, with_background=True, min_seg_size=min_seg_size, strides=strides) 279 metric = IOUError(iou_threshold) 280 super().__init__(segmenter, metric) 281 self.init_kwargs = {"offsets": offsets, "min_seg_size": min_seg_size, 282 "iou_threshold": iou_threshold, "strides": strides} 283 284 285class MWSSBDMetric(BaseInstanceSegmentationMetric): 286 def __init__(self, offsets, min_seg_size, strides=None): 287 segmenter = MWS(offsets, with_background=True, min_seg_size=min_seg_size, strides=strides) 288 metric = SymmetricBestDice() 289 super().__init__(segmenter, metric) 290 self.init_kwargs = {"offsets": offsets, "min_seg_size": min_seg_size, "strides": strides} 291 292 293class MWSVOIMetric(BaseInstanceSegmentationMetric): 294 def __init__(self, offsets, min_seg_size, strides=None): 295 segmenter = MWS(offsets, with_background=False, min_seg_size=min_seg_size, strides=strides) 296 metric = VariationOfInformation() 297 super().__init__(segmenter, metric) 298 self.init_kwargs = {"offsets": offsets, "min_seg_size": min_seg_size, "strides": strides} 299 300 301class MWSRandMetric(BaseInstanceSegmentationMetric): 302 def __init__(self, offsets, min_seg_size, strides=None): 303 segmenter = MWS(offsets, with_background=False, min_seg_size=min_seg_size, strides=strides) 304 metric = AdaptedRandError() 305 super().__init__(segmenter, metric) 306 self.init_kwargs = {"offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
14class BaseInstanceSegmentationMetric(nn.Module): 15 def __init__(self, segmenter, metric, to_numpy=True): 16 super().__init__() 17 self.segmenter = segmenter 18 self.metric = metric 19 self.to_numpy = to_numpy 20 21 def forward(self, input_, target): 22 if self.to_numpy: 23 input_ = input_.detach().cpu().numpy().astype("float32") 24 target = target.detach().cpu().numpy() 25 assert input_.ndim == target.ndim 26 assert len(input_) == len(target) 27 scores = [] 28 # compute the metric per batch 29 for pred, trgt in zip(input_, target): 30 seg = self.segmenter(pred) 31 # by convention we assume that the segmentation channel is always in the last channel of trgt 32 scores.append(self.metric(seg, trgt[-1].astype("uint32"))) 33 return torch.tensor(scores).mean()
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
15 def __init__(self, segmenter, metric, to_numpy=True): 16 super().__init__() 17 self.segmenter = segmenter 18 self.metric = metric 19 self.to_numpy = to_numpy
Initializes internal Module state, shared by both nn.Module and ScriptModule.
21 def forward(self, input_, target): 22 if self.to_numpy: 23 input_ = input_.detach().cpu().numpy().astype("float32") 24 target = target.detach().cpu().numpy() 25 assert input_.ndim == target.ndim 26 assert len(input_) == len(target) 27 scores = [] 28 # compute the metric per batch 29 for pred, trgt in zip(input_, target): 30 seg = self.segmenter(pred) 31 # by convention we assume that the segmentation channel is always in the last channel of trgt 32 scores.append(self.metric(seg, trgt[-1].astype("uint32"))) 33 return torch.tensor(scores).mean()
Defines the computation performed at every call.
Should be overridden by all subclasses.
Although the recipe for forward pass needs to be defined within
this function, one should call the Module
instance afterwards
instead of this since the former takes care of running the
registered hooks while the latter silently ignores them.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
40def filter_sizes(seg, min_seg_size, hmap=None): 41 seg_ids, counts = np.unique(seg, return_counts=True) 42 if hmap is None: 43 bg_ids = seg_ids[counts < min_seg_size] 44 seg[np.isin(seg, bg_ids)] = 0 45 else: 46 ndim = seg.ndim 47 hmap_ = hmap if hmap.ndim == ndim else np.max(hmap, axis=0) 48 seg, _ = apply_size_filter(seg, hmap_, min_seg_size) 49 return seg
52class MWS: 53 def __init__(self, offsets, with_background, min_seg_size, strides=None): 54 self.offsets = offsets 55 self.with_background = with_background 56 self.min_seg_size = min_seg_size 57 if strides is None: 58 strides = [4] * len(offsets[0]) 59 assert len(strides) == len(offsets[0]) 60 self.strides = strides 61 62 def __call__(self, affinities): 63 if self.with_background: 64 assert len(affinities) == len(self.offsets) + 1 65 mask, affinities = affinities[0], affinities[1:] 66 else: 67 assert len(affinities) == len(self.offsets) 68 mask = None 69 seg = elfseg.mutex_watershed.mutex_watershed(affinities, self.offsets, self.strides, 70 randomize_strides=True, mask=mask).astype("uint32") 71 if self.min_seg_size > 0: 72 seg = filter_sizes(seg, self.min_seg_size, 73 hmap=None if self.with_background else affinities) 74 return seg
53 def __init__(self, offsets, with_background, min_seg_size, strides=None): 54 self.offsets = offsets 55 self.with_background = with_background 56 self.min_seg_size = min_seg_size 57 if strides is None: 58 strides = [4] * len(offsets[0]) 59 assert len(strides) == len(offsets[0]) 60 self.strides = strides
77class EmbeddingMWS: 78 def __init__(self, delta, offsets, with_background, min_seg_size, strides=None): 79 self.delta = delta 80 self.offsets = offsets 81 self.with_background = with_background 82 self.min_seg_size = min_seg_size 83 if strides is None: 84 strides = [4] * len(offsets[0]) 85 assert len(strides) == len(offsets[0]) 86 self.strides = strides 87 88 def merge_background(self, seg, embeddings): 89 seg += 1 90 seg_ids, counts = np.unique(seg, return_counts=True) 91 bg_seg = seg_ids[np.argmax(counts)] 92 mean_embeddings = [] 93 for emb in embeddings: 94 mean_embeddings.append(vigra.analysis.extractRegionFeatures(emb, seg, features=["mean"])["mean"][None]) 95 mean_embeddings = np.concatenate(mean_embeddings, axis=0) 96 bg_embed = mean_embeddings[:, bg_seg][:, None] 97 bg_probs = elfemb._embeddings_to_probabilities(mean_embeddings, bg_embed, self.delta, 0) 98 bg_ids = np.where(bg_probs > 0.5) 99 seg[np.isin(seg, bg_ids)] = 0 100 vigra.analysis.relabelConsecutive(seg, out=seg) 101 return seg 102 103 def __call__(self, embeddings): 104 weight = partial(elfemb.discriminative_loss_weight, delta=self.delta) 105 seg = elfemb.segment_embeddings_mws( 106 embeddings, "l2", self.offsets, strides=self.strides, weight_function=weight 107 ).astype("uint32") 108 if self.with_background: 109 seg = self.merge_background(seg, embeddings) 110 if self.min_seg_size > 0: 111 seg = filter_sizes(seg, self.min_seg_size) 112 return seg
78 def __init__(self, delta, offsets, with_background, min_seg_size, strides=None): 79 self.delta = delta 80 self.offsets = offsets 81 self.with_background = with_background 82 self.min_seg_size = min_seg_size 83 if strides is None: 84 strides = [4] * len(offsets[0]) 85 assert len(strides) == len(offsets[0]) 86 self.strides = strides
88 def merge_background(self, seg, embeddings): 89 seg += 1 90 seg_ids, counts = np.unique(seg, return_counts=True) 91 bg_seg = seg_ids[np.argmax(counts)] 92 mean_embeddings = [] 93 for emb in embeddings: 94 mean_embeddings.append(vigra.analysis.extractRegionFeatures(emb, seg, features=["mean"])["mean"][None]) 95 mean_embeddings = np.concatenate(mean_embeddings, axis=0) 96 bg_embed = mean_embeddings[:, bg_seg][:, None] 97 bg_probs = elfemb._embeddings_to_probabilities(mean_embeddings, bg_embed, self.delta, 0) 98 bg_ids = np.where(bg_probs > 0.5) 99 seg[np.isin(seg, bg_ids)] = 0 100 vigra.analysis.relabelConsecutive(seg, out=seg) 101 return seg
115class Multicut: 116 def __init__(self, min_seg_size, anisotropic=False, dt_threshold=0.25, sigma_seeds=2.0, solver="decomposition"): 117 self.min_seg_size = min_seg_size 118 self.anisotropic = anisotropic 119 self.dt_threshold = dt_threshold 120 self.sigma_seeds = sigma_seeds 121 self.solver = solver 122 123 def __call__(self, boundaries): 124 if boundaries.shape[0] == 1: 125 boundaries = boundaries[0] 126 assert boundaries.ndim in (2, 3), f"{boundaries.ndim}" 127 if self.anisotropic and boundaries.ndim == 3: 128 ws, max_id = elfseg.stacked_watershed(boundaries, threshold=self.dt_threshold, 129 sigma_seed=self.sigma_seeds, 130 sigma_weights=self.sigma_seeds, 131 n_threads=1) 132 else: 133 ws, max_id = elfseg.distance_transform_watershed(boundaries, threshold=self.dt_threshold, 134 sigma_seeds=self.sigma_seeds, 135 sigma_weights=self.sigma_seeds) 136 rag = elfseg.compute_rag(ws, max_id + 1, n_threads=1) 137 feats = elfseg.compute_boundary_mean_and_length(rag, boundaries, n_threads=1)[:, 0] 138 costs = elfseg.compute_edge_costs(feats) 139 solver = elfseg.get_multicut_solver(self.solver) 140 node_labels = solver(rag, costs, n_threads=1) 141 seg = elfseg.project_node_labels_to_pixels(rag, node_labels, n_threads=1).astype("uint32") 142 if self.min_seg_size > 0: 143 seg = filter_sizes(seg, self.min_seg_size, hmap=boundaries) 144 return seg
147class HDBScan: 148 def __init__(self, min_size, eps, remove_largest): 149 self.min_size = min_size 150 self.eps = eps 151 self.remove_largest = remove_largest 152 153 def __call__(self, embeddings): 154 return elfemb.segment_hdbscan(embeddings, self.min_size, self.eps, self.remove_largest)
161class IOUError: 162 def __init__(self, threshold=0.5, metric="precision"): 163 self.threshold = threshold 164 self.metric = metric 165 166 def __call__(self, seg, target): 167 score = 1.0 - elfval.matching(seg, target, threshold=self.threshold)[self.metric] 168 return score
194class EmbeddingMWSIOUMetric(BaseInstanceSegmentationMetric): 195 def __init__(self, delta, offsets, min_seg_size, iou_threshold=0.5, strides=None): 196 segmenter = EmbeddingMWS(delta, offsets, with_background=True, min_seg_size=min_seg_size) 197 metric = IOUError(iou_threshold) 198 super().__init__(segmenter, metric) 199 self.init_kwargs = {"delta": delta, "offsets": offsets, "min_seg_size": min_seg_size, 200 "iou_threshold": iou_threshold, "strides": strides}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
195 def __init__(self, delta, offsets, min_seg_size, iou_threshold=0.5, strides=None): 196 segmenter = EmbeddingMWS(delta, offsets, with_background=True, min_seg_size=min_seg_size) 197 metric = IOUError(iou_threshold) 198 super().__init__(segmenter, metric) 199 self.init_kwargs = {"delta": delta, "offsets": offsets, "min_seg_size": min_seg_size, 200 "iou_threshold": iou_threshold, "strides": strides}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
203class EmbeddingMWSSBDMetric(BaseInstanceSegmentationMetric): 204 def __init__(self, delta, offsets, min_seg_size, strides=None): 205 segmenter = EmbeddingMWS(delta, offsets, with_background=True, min_seg_size=min_seg_size) 206 metric = SymmetricBestDice() 207 super().__init__(segmenter, metric) 208 self.init_kwargs = {"delta": delta, "offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
204 def __init__(self, delta, offsets, min_seg_size, strides=None): 205 segmenter = EmbeddingMWS(delta, offsets, with_background=True, min_seg_size=min_seg_size) 206 metric = SymmetricBestDice() 207 super().__init__(segmenter, metric) 208 self.init_kwargs = {"delta": delta, "offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
211class EmbeddingMWSVOIMetric(BaseInstanceSegmentationMetric): 212 def __init__(self, delta, offsets, min_seg_size, strides=None): 213 segmenter = EmbeddingMWS(delta, offsets, with_background=False, min_seg_size=min_seg_size) 214 metric = VariationOfInformation() 215 super().__init__(segmenter, metric) 216 self.init_kwargs = {"delta": delta, "offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
212 def __init__(self, delta, offsets, min_seg_size, strides=None): 213 segmenter = EmbeddingMWS(delta, offsets, with_background=False, min_seg_size=min_seg_size) 214 metric = VariationOfInformation() 215 super().__init__(segmenter, metric) 216 self.init_kwargs = {"delta": delta, "offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
219class EmbeddingMWSRandMetric(BaseInstanceSegmentationMetric): 220 def __init__(self, delta, offsets, min_seg_size, strides=None): 221 segmenter = EmbeddingMWS(delta, offsets, with_background=False, min_seg_size=min_seg_size) 222 metric = AdaptedRandError() 223 super().__init__(segmenter, metric) 224 self.init_kwargs = {"delta": delta, "offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
220 def __init__(self, delta, offsets, min_seg_size, strides=None): 221 segmenter = EmbeddingMWS(delta, offsets, with_background=False, min_seg_size=min_seg_size) 222 metric = AdaptedRandError() 223 super().__init__(segmenter, metric) 224 self.init_kwargs = {"delta": delta, "offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
227class HDBScanIOUMetric(BaseInstanceSegmentationMetric): 228 def __init__(self, min_size, eps, iou_threshold=0.5): 229 segmenter = HDBScan(min_size=min_size, eps=eps, remove_largest=True) 230 metric = IOUError(iou_threshold) 231 super().__init__(segmenter, metric) 232 self.init_kwargs = {"min_size": min_size, "eps": eps, "iou_threshold": iou_threshold}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
228 def __init__(self, min_size, eps, iou_threshold=0.5): 229 segmenter = HDBScan(min_size=min_size, eps=eps, remove_largest=True) 230 metric = IOUError(iou_threshold) 231 super().__init__(segmenter, metric) 232 self.init_kwargs = {"min_size": min_size, "eps": eps, "iou_threshold": iou_threshold}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
235class HDBScanSBDMetric(BaseInstanceSegmentationMetric): 236 def __init__(self, min_size, eps): 237 segmenter = HDBScan(min_size=min_size, eps=eps, remove_largest=True) 238 metric = SymmetricBestDice() 239 super().__init__(segmenter, metric) 240 self.init_kwargs = {"min_size": min_size, "eps": eps}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
236 def __init__(self, min_size, eps): 237 segmenter = HDBScan(min_size=min_size, eps=eps, remove_largest=True) 238 metric = SymmetricBestDice() 239 super().__init__(segmenter, metric) 240 self.init_kwargs = {"min_size": min_size, "eps": eps}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
243class HDBScanRandMetric(BaseInstanceSegmentationMetric): 244 def __init__(self, min_size, eps): 245 segmenter = HDBScan(min_size=min_size, eps=eps, remove_largest=True) 246 metric = AdaptedRandError() 247 super().__init__(segmenter, metric) 248 self.init_kwargs = {"min_size": min_size, "eps": eps}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
244 def __init__(self, min_size, eps): 245 segmenter = HDBScan(min_size=min_size, eps=eps, remove_largest=True) 246 metric = AdaptedRandError() 247 super().__init__(segmenter, metric) 248 self.init_kwargs = {"min_size": min_size, "eps": eps}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
251class HDBScanVOIMetric(BaseInstanceSegmentationMetric): 252 def __init__(self, min_size, eps): 253 segmenter = HDBScan(min_size=min_size, eps=eps, remove_largest=True) 254 metric = VariationOfInformation() 255 super().__init__(segmenter, metric) 256 self.init_kwargs = {"min_size": min_size, "eps": eps}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
252 def __init__(self, min_size, eps): 253 segmenter = HDBScan(min_size=min_size, eps=eps, remove_largest=True) 254 metric = VariationOfInformation() 255 super().__init__(segmenter, metric) 256 self.init_kwargs = {"min_size": min_size, "eps": eps}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
259class MulticutVOIMetric(BaseInstanceSegmentationMetric): 260 def __init__(self, min_seg_size, anisotropic=False, dt_threshold=0.25, sigma_seeds=2.0): 261 segmenter = Multicut(dt_threshold, anisotropic, sigma_seeds) 262 metric = VariationOfInformation() 263 super().__init__(segmenter, metric) 264 self.init_kwargs = {"anisotropic": anisotropic, "min_seg_size": min_seg_size, 265 "dt_threshold": dt_threshold, "sigma_seeds": sigma_seeds}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
260 def __init__(self, min_seg_size, anisotropic=False, dt_threshold=0.25, sigma_seeds=2.0): 261 segmenter = Multicut(dt_threshold, anisotropic, sigma_seeds) 262 metric = VariationOfInformation() 263 super().__init__(segmenter, metric) 264 self.init_kwargs = {"anisotropic": anisotropic, "min_seg_size": min_seg_size, 265 "dt_threshold": dt_threshold, "sigma_seeds": sigma_seeds}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
268class MulticutRandMetric(BaseInstanceSegmentationMetric): 269 def __init__(self, min_seg_size, anisotropic=False, dt_threshold=0.25, sigma_seeds=2.0): 270 segmenter = Multicut(dt_threshold, anisotropic, sigma_seeds) 271 metric = AdaptedRandError() 272 super().__init__(segmenter, metric) 273 self.init_kwargs = {"anisotropic": anisotropic, "min_seg_size": min_seg_size, 274 "dt_threshold": dt_threshold, "sigma_seeds": sigma_seeds}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
269 def __init__(self, min_seg_size, anisotropic=False, dt_threshold=0.25, sigma_seeds=2.0): 270 segmenter = Multicut(dt_threshold, anisotropic, sigma_seeds) 271 metric = AdaptedRandError() 272 super().__init__(segmenter, metric) 273 self.init_kwargs = {"anisotropic": anisotropic, "min_seg_size": min_seg_size, 274 "dt_threshold": dt_threshold, "sigma_seeds": sigma_seeds}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
277class MWSIOUMetric(BaseInstanceSegmentationMetric): 278 def __init__(self, offsets, min_seg_size, iou_threshold=0.5, strides=None): 279 segmenter = MWS(offsets, with_background=True, min_seg_size=min_seg_size, strides=strides) 280 metric = IOUError(iou_threshold) 281 super().__init__(segmenter, metric) 282 self.init_kwargs = {"offsets": offsets, "min_seg_size": min_seg_size, 283 "iou_threshold": iou_threshold, "strides": strides}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
278 def __init__(self, offsets, min_seg_size, iou_threshold=0.5, strides=None): 279 segmenter = MWS(offsets, with_background=True, min_seg_size=min_seg_size, strides=strides) 280 metric = IOUError(iou_threshold) 281 super().__init__(segmenter, metric) 282 self.init_kwargs = {"offsets": offsets, "min_seg_size": min_seg_size, 283 "iou_threshold": iou_threshold, "strides": strides}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
286class MWSSBDMetric(BaseInstanceSegmentationMetric): 287 def __init__(self, offsets, min_seg_size, strides=None): 288 segmenter = MWS(offsets, with_background=True, min_seg_size=min_seg_size, strides=strides) 289 metric = SymmetricBestDice() 290 super().__init__(segmenter, metric) 291 self.init_kwargs = {"offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
287 def __init__(self, offsets, min_seg_size, strides=None): 288 segmenter = MWS(offsets, with_background=True, min_seg_size=min_seg_size, strides=strides) 289 metric = SymmetricBestDice() 290 super().__init__(segmenter, metric) 291 self.init_kwargs = {"offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
294class MWSVOIMetric(BaseInstanceSegmentationMetric): 295 def __init__(self, offsets, min_seg_size, strides=None): 296 segmenter = MWS(offsets, with_background=False, min_seg_size=min_seg_size, strides=strides) 297 metric = VariationOfInformation() 298 super().__init__(segmenter, metric) 299 self.init_kwargs = {"offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
295 def __init__(self, offsets, min_seg_size, strides=None): 296 segmenter = MWS(offsets, with_background=False, min_seg_size=min_seg_size, strides=strides) 297 metric = VariationOfInformation() 298 super().__init__(segmenter, metric) 299 self.init_kwargs = {"offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile
302class MWSRandMetric(BaseInstanceSegmentationMetric): 303 def __init__(self, offsets, min_seg_size, strides=None): 304 segmenter = MWS(offsets, with_background=False, min_seg_size=min_seg_size, strides=strides) 305 metric = AdaptedRandError() 306 super().__init__(segmenter, metric) 307 self.init_kwargs = {"offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
Base class for all neural network modules.
Your models should also subclass this class.
Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
Submodules assigned in this way will be registered, and will have their
parameters converted too when you call to()
, etc.
As per the example above, an __init__()
call to the parent class
must be made before assignment on the child.
:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool
303 def __init__(self, offsets, min_seg_size, strides=None): 304 segmenter = MWS(offsets, with_background=False, min_seg_size=min_seg_size, strides=strides) 305 metric = AdaptedRandError() 306 super().__init__(segmenter, metric) 307 self.init_kwargs = {"offsets": offsets, "min_seg_size": min_seg_size, "strides": strides}
Initializes internal Module state, shared by both nn.Module and ScriptModule.
Inherited Members
- torch.nn.modules.module.Module
- dump_patches
- training
- call_super_init
- register_buffer
- register_parameter
- add_module
- register_module
- get_submodule
- get_parameter
- get_buffer
- get_extra_state
- set_extra_state
- apply
- cuda
- ipu
- xpu
- cpu
- type
- float
- double
- half
- bfloat16
- to_empty
- to
- register_full_backward_pre_hook
- register_backward_hook
- register_full_backward_hook
- register_forward_pre_hook
- register_forward_hook
- register_state_dict_pre_hook
- state_dict
- register_load_state_dict_post_hook
- load_state_dict
- parameters
- named_parameters
- buffers
- named_buffers
- children
- named_children
- modules
- named_modules
- train
- eval
- requires_grad_
- zero_grad
- extra_repr
- compile