-
Notifications
You must be signed in to change notification settings - Fork 19
Open
Labels
Type: BugSomething isn't workingSomething isn't working
Description
We need to remove some fields for getJobs result.
Command:
{
"command":"jobs",
"node":"16Uiu2HAm94yL3Sjem2piKmGkiHCdJyTn3F3aWueZTXKT38ekjuzr"
}
Response
[{"owner":"0xcF8A4B99640dEfaf99Acae9d770dEC9DfF37927d","did":null,"jobId":"2c2b2abeb565cfdf9a89c193cb1184494d2f9a246b9290ef1611010615549442","dateCreated":"1772632739.183","dateFinished":"1772633000.901","status":42,"statusText":"Error: disk quota exceeded","results":null,"inputDID":null,"algoDID":null,"agreementId":null,"environment":"0xff1004b67de08fc505fbf0a2089010d0f23015338c7def8557697513c4a39935-0x539cf0b9b60f16b1411d3d1ff371860af4af435208a8fcfdcb0db5eca84aff54","clusterHash":"0xff1004b67de08fc505fbf0a2089010d0f23015338c7def8557697513c4a39935","configlogURL":null,"publishlogURL":null,"algologURL":null,"outputsURL":null,"stopRequested":false,"algorithm":{"meta":{"rawcode":"\"\"\"\n🧠 MNIST Digit Classifier - VERSION ROBUSTA\nH200 GPU · Data Augmentation · BatchNorm · Deep CNN · LR Scheduler\nTarget: >99.5% accuracy\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nfrom torch.utils.data import DataLoader\nimport shutil, os, time\n\n# ── Configuración ──────────────────────────────────────────────────────────────\nEPOCHS = 20\nBATCH_SIZE = 256\nLR = 0.001\nNUM_WORKERS = 4\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"\\n{'='*60}\")\nprint(f\" Dispositivo : {device}\")\nif device.type == \"cuda\":\n print(f\" GPU : {torch.cuda.get_device_name(0)}\")\n print(f\" VRAM : {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB\")\nprint(f\" Epochs : {EPOCHS}\")\nprint(f\" Batch size : {BATCH_SIZE}\")\nprint(f\"{'='*60}\\n\")\n\n# ── Data Augmentation ──────────────────────────────────────────────────────────\ntrain_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.RandomAffine(\n degrees=15,\n translate=(0.1, 0.1),\n scale=(0.85, 1.15),\n shear=10\n ),\n transforms.ElasticTransform(alpha=30.0, sigma=4.0),\n transforms.Normalize((0.1307,), (0.3081,))\n])\n\ntest_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n])\n\nprint(\"📦 Descargando MNIST...\")\ntrain_dataset = datasets.MNIST('./data', train=True, download=True, transform=train_transform)\ntest_dataset = datasets.MNIST('./data', train=False, download=True, transform=test_transform)\n\ntrain_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True,\n num_workers=NUM_WORKERS, pin_memory=True)\ntest_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False,\n num_workers=NUM_WORKERS, pin_memory=True)\n\nprint(f\"✅ Train: {len(train_dataset):,} imgs | Test: {len(test_dataset):,} imgs\")\nprint(f\" Augmentation: rotación±15°, translate, zoom, shear, distorsión elástica\\n\")\n\n# ── Modelo Deep CNN con BatchNorm ──────────────────────────────────────────────\nclass DeepCNN(nn.Module):\n def __init__(self):\n super().__init__()\n self.block1 = nn.Sequential(\n nn.Conv2d(1, 32, kernel_size=3, padding=1),\n nn.BatchNorm2d(32), nn.ReLU(inplace=True),\n nn.Conv2d(32, 32, kernel_size=3, padding=1),\n nn.BatchNorm2d(32), nn.ReLU(inplace=True),\n nn.MaxPool2d(2), nn.Dropout2d(0.1),\n )\n self.block2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64), nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.BatchNorm2d(64), nn.ReLU(inplace=True),\n nn.MaxPool2d(2), nn.Dropout2d(0.1),\n )\n self.block3 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=3, padding=1),\n nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n nn.Dropout2d(0.1),\n )\n self.classifier = nn.Sequential(\n nn.Flatten(),\n nn.Linear(128 * 7 * 7, 512),\n nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Dropout(0.4),\n nn.Linear(512, 256),\n nn.BatchNorm1d(256), nn.ReLU(inplace=True), nn.Dropout(0.3),\n nn.Linear(256, 10)\n )\n\n def forward(self, x):\n return self.classifier(self.block3(self.block2(self.block1(x))))\n\n\nmodel = DeepCNN().to(device)\noptimizer = optim.AdamW(model.parameters(), lr=LR, weight_decay=1e-4)\ncriterion = nn.CrossEntropyLoss(label_smoothing=0.1)\nscheduler = optim.lr_scheduler.OneCycleLR(\n optimizer, max_lr=LR, epochs=EPOCHS,\n steps_per_epoch=len(train_loader),\n pct_start=0.3, anneal_strategy='cos'\n)\n\ntotal_params = sum(p.numel() for p in model.parameters())\nprint(f\"🏗️ DeepCNN lista — {total_params:,} parámetros\")\nprint(f\" Arquitectura: 3x[Conv→BN→ReLU→Conv→BN→ReLU→Pool] + 3x Linear\\n\")\n\nscaler = torch.cuda.amp.GradScaler(enabled=(device.type == \"cuda\"))\n\n# ── Entrenamiento ──────────────────────────────────────────────────────────────\ndef train_epoch(epoch):\n model.train()\n total_loss, correct, total = 0, 0, 0\n start = time.time()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device, non_blocking=True), target.to(device, non_blocking=True)\n optimizer.zero_grad()\n with torch.cuda.amp.autocast(enabled=(device.type == \"cuda\")):\n output = model(data)\n loss = criterion(output, target)\n scaler.scale(loss).backward()\n scaler.unscale_(optimizer)\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n scaler.step(optimizer)\n scaler.update()\n scheduler.step()\n total_loss += loss.item()\n pred = output.argmax(dim=1)\n correct += pred.eq(target).sum().item()\n total += len(target)\n if (batch_idx + 1) % 50 == 0:\n print(f\" Epoch {epoch:02d} [{(batch_idx+1)*BATCH_SIZE:>6}/{len(train_dataset)}] \"\n f\"Loss: {total_loss/(batch_idx+1):.4f} | Acc: {100.*correct/total:.2f}% \"\n f\"| LR: {scheduler.get_last_lr()[0]:.6f}\")\n return total_loss / len(train_loader), 100. * correct / total, time.time() - start\n\n\ndef evaluate():\n model.eval()\n correct, total = 0, 0\n per_correct = [0] * 10\n per_total = [0] * 10\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device, non_blocking=True), target.to(device, non_blocking=True)\n with torch.cuda.amp.autocast(enabled=(device.type == \"cuda\")):\n output = model(data)\n pred = output.argmax(dim=1)\n correct += pred.eq(target).sum().item()\n total += len(target)\n for t, p in zip(target, pred):\n per_total[t.item()] += 1\n per_correct[t.item()] += int(t == p)\n return 100. * correct / total, per_correct, per_total\n\n\nprint(\"🚀 Iniciando entrenamiento...\\n\")\nbest_acc = 0\n\nfor epoch in range(1, EPOCHS + 1):\n print(f\"─── Epoch {epoch:02d}/{EPOCHS} {'─'*40}\")\n loss, train_acc, elapsed = train_epoch(epoch)\n test_acc, per_correct, per_total = evaluate()\n print(f\" ✔ Loss: {loss:.4f} | Train: {train_acc:.2f}% | Test: {test_acc:.2f}% | {elapsed:.1f}s\")\n if test_acc > best_acc:\n best_acc = test_acc\n torch.save(model.state_dict(), \"best_model.pt\")\n print(f\" 💾 Nuevo mejor modelo! ({best_acc:.4f}%)\")\n print()\n\n# ── Reporte final por dígito ───────────────────────────────────────────────────\nprint(f\"\\n{'='*60}\")\nprint(f\" 🏁 Entrenamiento finalizado\")\nprint(f\" 🎯 Mejor accuracy: {best_acc:.4f}%\")\nprint(f\"{'='*60}\")\nprint(f\"\\n📊 Accuracy por dígito:\")\nmodel.load_state_dict(torch.load(\"best_model.pt\", map_location=device))\n_, per_correct, per_total = evaluate()\nfor i in range(10):\n acc_i = 100. * per_correct[i] / per_total[i]\n bar = '█' * int(acc_i / 2) + '░' * (50 - int(acc_i / 2))\n print(f\" {i}: {bar} {acc_i:.2f}%\")\n\nos.makedirs(\"/data/outputs\", exist_ok=True)\nshutil.copy(\"best_model.pt\", \"/data/outputs/best_model.pt\")\nprint(f\"\\n📤 Modelo guardado en /data/outputs/best_model.pt\")\n","container":{"image":"oceanprotocol/c2d_examples","tag":"py-general","entrypoint":"python $ALGO","additionalDockerFiles":null,"checksum":""}},"envs":{}},"assets":[],"isRunning":false,"isStarted":false,"containerImage":"oceanprotocol/c2d_examples:py-general","resources":[{"id":"gpu0","amount":1},{"id":"gpu1","amount":0},{"id":"disk","amount":3},{"id":"cpu","amount":4},{"id":"ram","amount":6}],"isFree":false,"algoStartTimestamp":"1772632745.449","algoStopTimestamp":"1772633000.901","terminationDetails":{"exitCode":null,"OOMKilled":null},"payment":{"chainId":8453,"token":"0x298f163244e0c8cc9316D6E97162e5792ac5d410","lockTx":"0xfd2345ebb2de9e71e1845359abe213f240a0a482bbb64327a3adf48ed95e3512","claimTx":null,"cost":0},"algoDuration":0,"queueMaxWaitTime":0,"maxJobDuration":3600},{"owner":"0xC5ea7916f95D5a087A644f1Dc0f7d19955eC446F","did":null,"jobId":"c3a5910bf9e99d6865b402c5fa8cc88bf916dcf04abad57d9a20c22a5c3e44f9","dateCreated":"1772620114.192","dateFinished":"1772620172.234","status":70,"statusText":"Job finished","results":null,"inputDID":null,"algoDID":null,"agreementId":null,"environment":"0xcfd75661169c9fe1613ff5ef2a376355ed40953aa41506c9075758753e633fb1-0x6a0d3ec45a955bbeacb4f0b6a4529ec3778f0e2a355483ffe5c685e2fcb0fb1b","stopRequested":false,"resources":[{"id":"gpu6","amount":0},{"id":"gpu7","amount":0},{"id":"disk","amount":1},{"id":"cpu","amount":1},{"id":"ram","amount":6}],"isFree":false,"algoStartTimestamp":"1772620118.807","algoStopTimestamp":"1772620170.201","terminationDetails":{"exitCode":0,"OOMKilled":false},"payment":{"chainId":11155111,"token":"0x1c7D4B196Cb0C7B01d743Fbc6116a902379C7238","lockTx":"0x8849df2bfb0b93dee0b3c96b86c74a75912be10a2d0a7a8dc416498f8435d7f6","claimTx":"0xea33354035b4e31729452b6a2fc8b86b9ec9b189ed35abe0f2ef58e4e4fa77f9","cost":0.000702},"algoDuration":51.39400005340576,"queueMaxWaitTime":0,"maxJobDuration":180},{"owner":"0xC5ea7916f95D5a087A644f1Dc0f7d19955eC446F","did":null,"jobId":"3fe41f10b69157340cc3bab4fbbc4c357d5bc4903eb8f4ce543d533a80fa5ac5","dateCreated":"1772619364.161","dateFinished":"1772619421.729","status":70,"statusText":"Job finished","results":null,"inputDID":null,"algoDID":null,"agreementId":null,"environment":"0xcfd75661169c9fe1613ff5ef2a376355ed40953aa41506c9075758753e633fb1-0x6a0d3ec45a955bbeacb4f0b6a4529ec3778f0e2a355483ffe5c685e2fcb0fb1b","stopRequested":false,"resources":[{"id":"gpu6","amount":0},{"id":"gpu7","amount":0},{"id":"disk","amount":1},{"id":"cpu","amount":1},{"id":"ram","amount":6}],"isFree":false,"algoStartTimestamp":"1772619368.978","algoStopTimestamp":"1772619419.698","terminationDetails":{"exitCode":0,"OOMKilled":false},"payment":{"chainId":11155111,"token":"0x1c7D4B196Cb0C7B01d743Fbc6116a902379C7238","lockTx":"0x2cd89ff8ef5f751392703830592eadb05e96597eb11cfddf75782e8921be0d6c","claimTx":"0xea33354035b4e31729452b6a2fc8b86b9ec9b189ed35abe0f2ef58e4e4fa77f9","cost":0.000702},"algoDuration":50.72000002861023,"queueMaxWaitTime":0,"maxJobDuration":180},{"owner":"0xC5ea7916f95D5a087A644f1Dc0f7d19955eC446F","did":null,"jobId":"37037720de8b753398d1401dcae6fea4f9efdbe2dbd60745e37e0cd1279c00e7","dateCreated":"1772619304.417","dateFinished":"1772619362.421","status":70,"statusText":"Job finished","results":null,"inputDID":null,"algoDID":null,"agreementId":null,"environment":"0xcfd75661169c9fe1613ff5ef2a376355ed40953aa41506c9075758753e633fb1-0x6a0d3ec45a955bbeacb4f0b6a4529ec3778f0e2a355483ffe5c685e2fcb0fb1b","stopRequested":false,"resources":[{"id":"gpu6","amount":0},{"id":"gpu7","amount":0},{"id":"disk","amount":1},{"id":"cpu","amount":1},{"id":"ram","amount":6}],"isFree":false,"algoStartTimestamp":"1772619308.808","algoStopTimestamp":"1772619360.376","terminationDetails":{"exitCode":0,"OOMKilled":false},"payment":{"chainId":11155111,"token":"0x1c7D4B196Cb0C7B01d743Fbc6116a902379C7238","lockTx":"0x4770e5f4351926cd2f0a3054948e09b31f31aad5b2b0e3d8315e2f2318a1032e","claimTx":"0xea33354035b4e31729452b6a2fc8b86b9ec9b189ed35abe0f2ef58e4e4fa77f9","cost":0.000702},"algoDuration":51.567999839782715,"queueMaxWaitTime":0,"maxJobDuration":180},{"owner":"0xf9B58b80Ece90bD16F394aDD8F3c32b19D13a269","did":null,"jobId":"9519fe3caab2d2caadb5bb798f8caaf88b9e59eac146ea19c9819128fd51eb6f","dateCreated":"1772586907.44","dateFinished":"1772588230.056","status":70,"statusText":"Job finished","results":null,"inputDID":null,"algoDID":null,"agreementId":null,"environment":"0xff1004b67de08fc505fbf0a2089010d0f23015338c7def8557697513c4a39935-0x539cf0b9b60f16b1411d3d1ff371860af4af435208a8fcfdcb0db5eca84aff54","stopRequested":false,"resources":[{"id":"gpu0","amount":1},{"id":"gpu1","amount":1},{"id":"disk","amount":200},{"id":"cpu","amount":40},{"id":"ram","amount":122}],"isFree":false,"algoStartTimestamp":"1772587003.616","algoStopTimestamp":"1772588227.765","terminationDetails":{"exitCode":0,"OOMKilled":false},"payment":{"chainId":8453,"token":"0x298f163244e0c8cc9316D6E97162e5792ac5d410","lockTx":"0x6340ee7a3ca6863cd3af80955fd2fc78364d27b509ff0cb44ec31b1e82939438","claimTx":"0x3eb7e1d054c3efeca53e7d1fe0494cdcdd75a7c7f3f3c04793e93a1057e6277c","cost":1.8605999999999998},"algoDuration":1224.1490001678467,"queueMaxWaitTime":0,"maxJobDuration":10800},{"owner":"0xf9B58b80Ece90bD16F394aDD8F3c32b19D13a269","did":null,"jobId":"b8a0f405d607b14571cbc56a64c5b3a36320d05188f99a63a269bec8bff008ee","dateCreated":"1772585962.31","dateFinished":"1772586033.492","status":70,"statusText":"Job finished","results":null,"inputDID":null,"algoDID":null,"agreementId":null,"environment":"0xff1004b67de08fc505fbf0a2089010d0f23015338c7def8557697513c4a39935-0x539cf0b9b60f16b1411d3d1ff371860af4af435208a8fcfdcb0db5eca84aff54","stopRequested":false,"resources":[{"id":"gpu0","amount":1},{"id":"gpu1","amount":1},{"id":"disk","amount":200},{"id":"cpu","amount":40},{"id":"ram","amount":122}],"isFree":false,"algoStartTimestamp":"1772585987.77","algoStopTimestamp":"1772586031.246","terminationDetails":{"exitCode":0,"OOMKilled":false},"payment":{"chainId":8453,"token":"0x298f163244e0c8cc9316D6E97162e5792ac5d410","lockTx":"0xd1f317e418caaa7a2b8dac8f660ef96fb46e06c4c0d770c0eb5a1c532746ba47","claimTx":"0x6a0de43b8d7858f8659259cccab7e7fad60079b805b8055af8e71b8ad502a237","cost":0.0886},"algoDuration":43.4760000705719,"queueMaxWaitTime":0,"maxJobDuration":10800},{"owner":"0xf9B58b80Ece90bD16F394aDD8F3c32b19D13a269","did":null,"jobId":"4caf1bb03710952f1bc5ef75f5f42ef60dc1650b3487d41069231be93c6eda37","dateCreated":"1772585759.35","dateFinished":"1772585769.719","status":70,"statusText":"Job finished","results":null,"inputDID":null,"algoDID":null,"agreementId":null,"environment":"0xff1004b67de08fc505fbf0a2089010d0f23015338c7def8557697513c4a39935-0x539cf0b9b60f16b1411d3d1ff371860af4af435208a8fcfdcb0db5eca84aff54","stopRequested":false,"resources":[{"id":"gpu0","amount":1},{"id":"gpu1","amount":1},{"id":"disk","amount":1000},{"id":"cpu","amount":40},{"id":"ram","amount":80}],"isFree":false,"algoStartTimestamp":"1772585762.156","algoStopTimestamp":"1772585767.684","terminationDetails":{"exitCode":1,"OOMKilled":false},"payment":{"chainId":8453,"token":"0x298f163244e0c8cc9316D6E97162e5792ac5d410","lockTx":"0xf8f7cf11dbe3c83fe967df33bf5d1cc4bebd2d66064284694261837358a98768","claimTx":"0x6a0de43b8d7858f8659259cccab7e7fad60079b805b8055af8e71b8ad502a237","cost":0.086},"algoDuration":5.528000116348267,"queueMaxWaitTime":0,"maxJobDuration":3600},{"owner":"0xf9B58b80Ece90bD16F394aDD8F3c32b19D13a269","did":null,"jobId":"8751099d20cd4bf3b86bf0ea1d55a85ead03d8eee491bb61dfabde8ef7f05a7d","dateCreated":"1772582643.388","dateFinished":"1772583187.461","status":70,"statusText":"Job finished","results":null,"inputDID":null,"algoDID":null,"agreementId":null,"environment":"0xff1004b67de08fc505fbf0a2089010d0f23015338c7def8557697513c4a39935-0x539cf0b9b60f16b1411d3d1ff371860af4af435208a8fcfdcb0db5eca84aff54","stopRequested":false,"resources":[{"id":"gpu0","amount":0},{"id":"gpu1","amount":0},{"id":"disk","amount":0},{"id":"cpu","amount":1},{"id":"ram","amount":1}],"isFree":false,"algoStartTimestamp":"1772583153.963","algoStopTimestamp":"1772583185.427","terminationDetails":{"exitCode":137,"OOMKilled":true},"payment":{"chainId":8453,"token":"0x298f163244e0c8cc9316D6E97162e5792ac5d410","lockTx":"0xae9e6ff79b649719c5f8e556717f1ac25e923a9b873d148b69f7695538c44c08","claimTx":"0x6a0de43b8d7858f8659259cccab7e7fad60079b805b8055af8e71b8ad502a237","cost":0.0002},"algoDuration":31.46399998664856,"queueMaxWaitTime":0,"maxJobDuration":3600}]
Reactions are currently unavailable
Metadata
Metadata
Assignees
Labels
Type: BugSomething isn't workingSomething isn't working