conda install -c conda-forge zwatershed
pip install zwatershed
(segs, rand) = zwatershed_and_metrics(segTrue, aff_graph, eval_thresh_list, seg_save_thresh_list)
segs
: list of segmentations
len(segs) == len(seg_save_thresh_list)
rand
: dict
rand['V_Rand']
: V_Rand score (scalar)rand['V_Rand_split']
: list of score values
len(rand['V_Rand_split']) == len(eval_thresh_list)
rand['V_Rand_merge']
: list of score values,
len(rand['V_Rand_merge']) == len(eval_thresh_list)
segs = zwatershed(aff_graph, seg_save_thresh_list)
segs
: list of segmentations
len(segs) == len(seg_save_thresh_list)
rand = zwatershed_and_metrics_h5(segTrue, aff_graph, eval_thresh_list, seg_save_thresh_list, seg_save_path)
zwatershed_h5(aff_graph, eval_thresh_list, seg_save_path)
(segs, rand) = zwatershed_and_metrics_arb(segTrue, node1, node2, edgeWeight, eval_thresh_list, seg_save_thresh_list)
segs = zwatershed_arb(seg_shape, node1, node2, edgeWeight, seg_save_thresh_list)
rand = zwatershed_and_metrics_h5_arb(segTrue, node1, node2, edgeWeight, eval_thresh_list, seg_save_thresh_list, seg_save_path)
zwatershed_h5_arb(seg_shape, node1, node2, edgeWeight, eval_thresh_list, seg_save_path)
partition_data = partition_subvols(pred_file,out_folder,max_len)
eval_with_spark(partition_data[0])
eval_with_par_map(partition_data[0],NUM_WORKERS)
stitch_and_save(partition_data,outname)
seg_merged = merge_by_thresh(seg,seg_sizes,rg,thresh)