| |
| from __future__ import annotations |
| import os |
| from typing import Optional, Dict |
| from huggingface_hub import hf_hub_download |
|
|
| from subiso_dataset import ( |
| SubgraphIsomorphismDataset, |
| |
| TRAIN_MODE, VAL_MODE, TEST_MODE |
| ) |
|
|
| |
| |
| |
|
|
| def _pairs_for_size(dataset_size: str) -> str: |
| return "80k" if dataset_size == "small" else "240k" |
|
|
| def _folder_for_size(dataset_size: str) -> str: |
| return "small_dataset" if dataset_size == "small" else "large_dataset" |
|
|
| def _normalize_name(base_name: str, dataset_size: str) -> str: |
| """ |
| Accepts 'aids' or 'aids240k' (and similarly for other sets). |
| If bare name -> append pairs; if already has 80k/240k -> keep as-is. |
| """ |
| pairs = _pairs_for_size(dataset_size) |
| if base_name.endswith(("80k", "240k")): |
| return base_name |
| return f"{base_name}{pairs}" |
|
|
| def _mode_prefix_and_dir(mode: str) -> tuple[str, str]: |
| """ |
| File prefix uses 'test' when mode contains 'test' (repo convention). |
| Directory has train/val/test. Map Extra_test_300 => 'test'. |
| """ |
| prefix = "test" if "test" in mode.lower() else mode |
| mode_dir = "test" if "test" in mode.lower() else mode |
| return prefix, mode_dir |
|
|
| |
| |
| |
|
|
| def _ensure_paths( |
| repo_id: str, |
| mode: str, |
| dataset_name: str, |
| dataset_size: str, |
| local_root: Optional[str] = None, |
| ) -> Dict[str, str]: |
| """ |
| Download the three files needed into cache (or local_root if set): |
| - large_dataset/splits/<mode_dir>/<prefix>_<base>_query_subgraphs.pkl |
| - large_dataset/splits/<mode_dir>/<prefix>_<base>_rel_nx_is_subgraph_iso.pkl |
| - large_dataset/corpus/<base>_corpus_subgraphs.pkl |
| where <base> is normalized (contains 80k/240k exactly once). |
| """ |
| folder = _folder_for_size(dataset_size) |
| base = _normalize_name(dataset_name, dataset_size) |
| |
|
|
| query_fname = f"{mode}_{base}_query_subgraphs.pkl" |
| rel_fname = f"{mode}_{base}_rel_nx_is_subgraph_iso.pkl" |
| corpus_fname = f"{base}_corpus_subgraphs.pkl" |
|
|
| repo_query_path = f"{folder}/splits/{mode}/{query_fname}" |
| repo_rel_path = f"{folder}/splits/{mode}/{rel_fname}" |
| repo_corpus_path = f"{folder}/corpus/{corpus_fname}" |
|
|
| kwargs = dict( |
| repo_id=repo_id, |
| repo_type="dataset", |
| local_dir=local_root, |
| local_dir_use_symlinks=False, |
| ) |
|
|
| query_path = hf_hub_download(filename=repo_query_path, **kwargs) |
| rel_path = hf_hub_download(filename=repo_rel_path, **kwargs) |
| corpus_path = hf_hub_download(filename=repo_corpus_path, **kwargs) |
|
|
| return {"query": query_path, "rel": rel_path, "corpus": corpus_path} |
|
|
| |
| |
| |
|
|
| def load_isonetpp_benchmark( |
| repo_id: str = "structlearning/isonetpp-benchmark", |
| mode: str = "train", |
| dataset_name: str = "aids", |
| dataset_size: str = "large", |
| batch_size: int = 128, |
| data_type: str = "gmn", |
| device: Optional[str] = None, |
| download_root: Optional[str] = None, |
| ): |
| |
| mode_map = { |
| "train": TRAIN_MODE, |
| "val": VAL_MODE, |
| "test": TEST_MODE, |
| |
| |
| } |
| mode_norm = mode_map.get(mode, mode) |
|
|
| paths = _ensure_paths( |
| repo_id=repo_id, |
| mode=mode_norm, |
| dataset_name=dataset_name, |
| dataset_size=dataset_size, |
| local_root=download_root, |
| ) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| file_dir = os.path.dirname(paths["query"]) |
| splits_dir = os.path.dirname(file_dir) |
| folder_dir = os.path.dirname(splits_dir) |
| parent_dir = os.path.dirname(folder_dir) |
|
|
| dataset_config = dict( |
| mode=mode_norm, |
| dataset_name=dataset_name, |
| dataset_size=dataset_size, |
| batch_size=batch_size, |
| data_type=data_type, |
| dataset_base_path=parent_dir, |
| dataset_path_override=None, |
| experiment=None, |
| device=device, |
| ) |
|
|
| return SubgraphIsomorphismDataset(**dataset_config) |
|
|