Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548

549

550

551

552

553

554

555

556

557

558

559

560

561

562

563

564

565

566

567

568

569

570

571

572

573

574

575

576

577

578

579

580

581

582

583

584

585

586

587

588

589

590

591

592

593

594

595

596

597

598

599

600

601

602

603

604

605

606

607

608

609

610

611

612

613

614

615

616

617

618

619

620

621

622

623

624

625

626

627

628

629

630

631

632

633

634

635

636

637

638

639

640

641

642

643

644

645

646

647

648

649

650

651

652

653

654

655

656

657

658

659

660

661

662

663

664

665

666

667

668

669

670

671

672

673

674

675

676

677

678

679

680

681

682

683

684

685

686

687

688

689

690

691

692

693

694

695

696

697

698

699

700

701

702

703

704

705

706

707

708

709

710

711

712

713

714

715

716

717

718

719

720

721

722

723

724

725

726

727

728

729

730

731

732

733

734

735

736

737

738

739

740

741

742

743

744

745

746

747

748

749

750

751

752

753

754

755

756

757

758

759

760

761

762

763

764

765

766

767

768

769

770

771

772

773

774

775

776

777

778

779

780

781

782

783

784

785

786

787

788

789

790

791

792

793

794

795

796

797

798

799

800

801

802

803

804

805

806

807

808

809

810

811

812

813

814

815

816

817

818

819

820

821

822

823

824

825

826

827

828

829

830

831

832

833

834

835

836

837

838

839

840

841

842

843

844

845

846

847

848

849

850

851

852

853

854

855

856

857

858

859

860

861

862

863

864

865

866

867

868

869

870

871

872

873

874

875

876

877

878

879

880

881

882

883

884

885

886

887

888

889

890

891

892

893

894

895

896

897

898

899

900

901

902

903

904

905

906

907

908

909

910

911

912

913

914

915

916

917

918

919

920

921

922

923

924

925

926

927

928

929

930

931

932

933

934

935

936

937

938

939

940

941

942

943

944

945

946

947

948

949

950

951

952

953

954

955

956

957

958

959

960

961

962

963

964

965

966

967

968

969

970

971

972

973

974

975

976

977

978

979

980

981

982

983

984

985

986

987

988

989

990

991

992

993

994

995

996

997

998

999

1000

1001

1002

1003

1004

1005

1006

1007

1008

1009

1010

1011

1012

1013

1014

1015

1016

1017

1018

1019

1020

1021

1022

1023

1024

1025

1026

1027

1028

1029

1030

1031

1032

1033

1034

1035

1036

1037

1038

1039

1040

1041

1042

1043

1044

1045

1046

1047

1048

1049

1050

1051

1052

1053

1054

1055

1056

1057

1058

1059

1060

1061

1062

1063

1064

1065

1066

1067

1068

1069

1070

1071

1072

1073

1074

1075

1076

1077

1078

1079

1080

1081

1082

1083

1084

1085

1086

1087

1088

1089

1090

1091

1092

1093

1094

1095

1096

1097

1098

1099

1100

1101

1102

1103

1104

1105

1106

1107

1108

1109

1110

1111

1112

1113

1114

1115

1116

1117

1118

1119

1120

1121

1122

1123

1124

1125

1126

1127

1128

1129

1130

1131

1132

1133

1134

1135

1136

1137

1138

1139

1140

1141

1142

1143

1144

1145

1146

1147

1148

1149

1150

1151

1152

1153

1154

1155

1156

1157

1158

1159

1160

1161

1162

1163

1164

1165

1166

1167

1168

1169

1170

1171

1172

1173

1174

1175

1176

1177

1178

1179

1180

1181

1182

1183

1184

1185

1186

1187

1188

1189

1190

1191

1192

1193

1194

1195

1196

1197

1198

1199

1200

1201

1202

1203

1204

1205

1206

1207

1208

1209

1210

1211

1212

1213

1214

1215

1216

1217

1218

1219

1220

1221

1222

1223

1224

1225

1226

1227

1228

1229

1230

1231

1232

1233

1234

1235

1236

1237

1238

1239

1240

1241

1242

1243

1244

1245

1246

1247

1248

1249

1250

1251

1252

1253

1254

1255

1256

1257

1258

1259

1260

1261

1262

1263

1264

1265

1266

1267

1268

1269

1270

1271

1272

1273

1274

1275

1276

1277

1278

1279

1280

1281

1282

1283

1284

1285

1286

1287

1288

1289

1290

1291

1292

1293

1294

1295

1296

1297

1298

1299

1300

1301

1302

1303

1304

1305

1306

1307

1308

1309

1310

1311

1312

1313

1314

1315

1316

1317

1318

1319

1320

1321

1322

1323

1324

1325

1326

1327

1328

1329

1330

1331

1332

1333

1334

1335

1336

1337

1338

1339

1340

1341

1342

1343

1344

1345

1346

1347

1348

1349

1350

1351

1352

1353

1354

1355

1356

1357

1358

1359

1360

1361

1362

1363

1364

1365

1366

1367

1368

1369

1370

1371

1372

1373

1374

1375

1376

1377

1378

1379

1380

1381

1382

1383

1384

1385

1386

1387

1388

1389

1390

1391

1392

1393

1394

1395

1396

1397

1398

1399

1400

1401

1402

1403

1404

1405

1406

1407

1408

1409

1410

1411

1412

1413

1414

1415

1416

1417

1418

1419

1420

1421

1422

1423

1424

1425

1426

1427

1428

1429

1430

1431

1432

1433

1434

1435

1436

1437

1438

1439

1440

1441

1442

1443

1444

1445

1446

1447

1448

1449

1450

1451

1452

1453

1454

1455

1456

1457

1458

1459

1460

1461

1462

1463

1464

1465

1466

1467

1468

1469

1470

1471

1472

1473

1474

1475

1476

1477

1478

1479

1480

1481

1482

1483

1484

1485

1486

1487

1488

1489

1490

1491

1492

1493

1494

1495

1496

1497

1498

1499

1500

1501

1502

1503

1504

1505

1506

1507

1508

1509

1510

1511

1512

1513

1514

1515

1516

1517

1518

1519

1520

1521

1522

1523

1524

1525

1526

1527

1528

1529

1530

1531

1532

1533

1534

1535

1536

1537

1538

1539

1540

1541

1542

1543

1544

1545

1546

1547

1548

1549

1550

1551

1552

1553

1554

1555

1556

1557

1558

1559

1560

1561

1562

1563

1564

1565

1566

1567

1568

1569

1570

1571

1572

1573

1574

1575

1576

1577

1578

1579

1580

1581

1582

1583

1584

1585

1586

1587

1588

1589

1590

1591

1592

1593

1594

1595

1596

1597

1598

1599

1600

1601

1602

1603

1604

1605

1606

1607

1608

1609

1610

1611

1612

1613

1614

1615

1616

1617

1618

1619

1620

1621

1622

1623

1624

1625

1626

1627

1628

1629

1630

1631

1632

1633

1634

1635

1636

1637

1638

1639

1640

1641

1642

1643

1644

1645

1646

1647

1648

1649

1650

1651

1652

1653

1654

1655

1656

1657

1658

1659

1660

1661

1662

1663

1664

1665

1666

1667

1668

1669

1670

1671

1672

1673

1674

1675

1676

1677

1678

1679

1680

1681

1682

1683

1684

1685

1686

1687

1688

1689

1690

1691

1692

1693

1694

1695

1696

1697

1698

1699

1700

1701

1702

1703

1704

1705

1706

1707

1708

1709

1710

1711

1712

1713

1714

1715

1716

1717

1718

1719

1720

1721

1722

1723

1724

1725

1726

1727

1728

1729

1730

1731

1732

1733

1734

1735

1736

1737

1738

1739

1740

1741

1742

1743

1744

1745

1746

1747

1748

1749

1750

1751

1752

1753

1754

1755

1756

1757

1758

1759

1760

1761

1762

1763

1764

1765

1766

1767

1768

1769

1770

1771

1772

1773

1774

1775

1776

1777

1778

1779

1780

1781

1782

1783

1784

1785

1786

1787

1788

1789

1790

1791

1792

1793

1794

1795

1796

1797

1798

1799

1800

1801

1802

1803

1804

1805

1806

1807

1808

1809

1810

1811

1812

1813

1814

1815

1816

1817

1818

1819

1820

1821

1822

1823

1824

1825

1826

1827

1828

1829

1830

1831

1832

1833

1834

1835

1836

1837

1838

1839

1840

1841

1842

1843

1844

1845

1846

1847

1848

1849

1850

1851

1852

1853

1854

1855

1856

1857

1858

1859

1860

1861

1862

1863

1864

1865

1866

1867

1868

1869

1870

1871

1872

1873

1874

1875

1876

1877

1878

1879

1880

1881

1882

1883

1884

1885

1886

1887

1888

1889

1890

1891

1892

1893

1894

1895

1896

1897

1898

1899

1900

1901

1902

1903

1904

1905

1906

1907

1908

1909

1910

1911

1912

1913

1914

1915

1916

1917

1918

1919

1920

1921

1922

1923

1924

1925

1926

1927

1928

1929

1930

1931

1932

1933

1934

1935

1936

1937

1938

1939

1940

1941

1942

1943

1944

1945

1946

1947

1948

1949

1950

1951

1952

1953

1954

1955

1956

1957

1958

1959

1960

1961

1962

1963

1964

1965

1966

1967

1968

1969

1970

1971

1972

1973

1974

1975

1976

1977

1978

1979

1980

1981

1982

1983

1984

1985

1986

1987

1988

1989

1990

1991

1992

1993

1994

1995

1996

1997

1998

1999

2000

2001

2002

2003

2004

2005

2006

2007

2008

2009

2010

2011

2012

2013

2014

2015

2016

2017

2018

2019

2020

2021

2022

2023

2024

2025

2026

2027

2028

2029

2030

2031

2032

2033

2034

2035

2036

2037

2038

2039

2040

2041

2042

2043

2044

2045

2046

2047

2048

2049

2050

2051

2052

2053

2054

2055

2056

2057

2058

2059

2060

2061

2062

2063

2064

2065

2066

2067

2068

2069

2070

2071

2072

2073

2074

2075

2076

2077

2078

2079

2080

2081

2082

2083

2084

2085

2086

2087

2088

2089

2090

2091

2092

2093

2094

2095

2096

2097

2098

2099

2100

2101

2102

2103

2104

2105

2106

2107

2108

2109

2110

2111

2112

2113

2114

2115

2116

2117

2118

2119

2120

2121

2122

2123

2124

2125

2126

2127

2128

2129

2130

2131

2132

2133

2134

2135

2136

2137

2138

2139

2140

2141

2142

2143

2144

2145

2146

2147

2148

2149

2150

2151

2152

2153

2154

2155

2156

2157

2158

2159

2160

2161

2162

2163

2164

2165

2166

2167

2168

2169

2170

2171

2172

2173

2174

2175

2176

2177

2178

2179

2180

2181

2182

2183

2184

2185

2186

2187

2188

2189

2190

2191

2192

2193

2194

2195

2196

2197

2198

2199

2200

2201

2202

2203

2204

2205

2206

2207

2208

2209

2210

2211

2212

2213

2214

2215

2216

2217

2218

2219

2220

2221

2222

2223

2224

2225

2226

2227

2228

2229

2230

2231

2232

2233

2234

2235

2236

2237

2238

2239

2240

2241

2242

2243

2244

2245

2246

2247

2248

2249

2250

2251

2252

2253

2254

2255

2256

2257

2258

2259

2260

2261

2262

2263

2264

2265

2266

2267

2268

2269

2270

2271

2272

2273

2274

2275

2276

2277

2278

2279

2280

2281

2282

2283

2284

2285

2286

2287

2288

2289

2290

2291

2292

2293

2294

2295

2296

2297

2298

2299

2300

2301

2302

2303

2304

2305

2306

2307

2308

2309

2310

2311

2312

2313

2314

2315

2316

2317

2318

2319

2320

2321

2322

2323

2324

2325

2326

2327

2328

2329

2330

2331

2332

2333

2334

2335

2336

2337

2338

2339

2340

2341

2342

2343

2344

2345

2346

2347

2348

2349

2350

2351

2352

2353

2354

2355

2356

2357

2358

2359

2360

2361

2362

2363

2364

2365

2366

2367

2368

2369

2370

2371

2372

2373

2374

2375

2376

2377

2378

2379

2380

2381

2382

2383

2384

2385

2386

2387

2388

2389

2390

2391

2392

2393

2394

2395

2396

2397

2398

2399

2400

2401

2402

2403

2404

2405

2406

2407

2408

2409

2410

2411

2412

2413

2414

2415

2416

2417

2418

2419

2420

2421

2422

2423

2424

2425

2426

2427

2428

2429

2430

2431

2432

2433

2434

2435

2436

2437

2438

2439

2440

2441

2442

2443

2444

2445

2446

2447

2448

2449

2450

2451

2452

2453

2454

2455

2456

2457

2458

2459

2460

2461

2462

2463

2464

2465

2466

2467

2468

2469

2470

2471

2472

2473

# coding: utf-8 

# Copyright (c) Pymatgen Development Team. 

# Distributed under the terms of the MIT License. 

""" 

A Flow is a container for Works, and works consist of tasks. 

Flows are the final objects that can be dumped directly to a pickle file on disk 

Flows are executed using abirun (abipy). 

""" 

from __future__ import unicode_literals, division, print_function 

 

import os 

import sys 

import time 

import collections 

import warnings 

import shutil 

import copy 

import numpy as np 

 

from pprint import pprint 

from six.moves import map, StringIO 

from atomicfile import AtomicFile 

from tabulate import tabulate 

from pydispatch import dispatcher 

from collections import OrderedDict 

from monty.collections import as_set, dict2namedtuple 

from monty.string import list_strings, is_string 

from monty.operator import operator_from_str 

from monty.io import FileLock 

from monty.pprint import draw_tree 

from monty.termcolor import cprint, colored, cprint_map 

from monty.inspect import find_top_pyfile 

from pymatgen.serializers.pickle_coders import pmg_pickle_load, pmg_pickle_dump 

from monty.json import MSONable 

from pymatgen.serializers.json_coders import pmg_serialize 

from pymatgen.core.units import Memory 

from . import wrappers 

from .nodes import Status, Node, NodeError, NodeResults, Dependency, GarbageCollector, check_spectator 

from .tasks import ScfTask, DdkTask, DdeTask, TaskManager, FixQueueCriticalError 

from .utils import File, Directory, Editor 

from .abiinspect import yaml_read_irred_perts 

from .works import NodeContainer, Work, BandStructureWork, PhononWork, BecWork, G0W0Work, QptdmWork 

 

 

import logging 

logger = logging.getLogger(__name__) 

 

__author__ = "Matteo Giantomassi" 

__copyright__ = "Copyright 2013, The Materials Project" 

__version__ = "0.1" 

__maintainer__ = "Matteo Giantomassi" 

 

 

__all__ = [ 

"Flow", 

"G0W0WithQptdmFlow", 

"bandstructure_flow", 

"g0w0_flow", 

"phonon_flow", 

] 

 

 

class FlowResults(NodeResults): 

 

JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy() 

#JSON_SCHEMA["properties"] = { 

# "queries": {"type": "string", "required": True}, 

#} 

 

@classmethod 

def from_node(cls, flow): 

"""Initialize an instance from a Work instance.""" 

new = super(FlowResults, cls).from_node(flow) 

 

# Will put all files found in outdir in GridFs 

d = {os.path.basename(f): f for f in flow.outdir.list_filepaths()} 

 

# Add the pickle file. 

d["pickle"] = flow.pickle_file if flow.pickle_protocol != 0 else (flow.pickle_file, "t") 

new.add_gridfs_files(**d) 

 

return new 

 

 

class FlowError(NodeError): 

"""Base Exception for :class:`Node` methods""" 

 

 

class Flow(Node, NodeContainer, MSONable): 

""" 

This object is a container of work. Its main task is managing the 

possible inter-dependencies among the work and the creation of 

dynamic workflows that are generated by callbacks registered by the user. 

 

.. attributes:: 

 

creation_date: String with the creation_date 

pickle_protocol: Protocol for Pickle database (default: -1 i.e. latest protocol) 

 

Important methods for constructing flows: 

 

.. methods:: 

 

register_work: register (add) a work to the flow 

resister_task: register a work that contains only this task returns the work 

allocate: propagate the workdir and manager of the flow to all the registered tasks 

build: 

build_and_pickle_dump: 

""" 

VERSION = "0.1" 

PICKLE_FNAME = "__AbinitFlow__.pickle" 

 

Error = FlowError 

 

Results = FlowResults 

 

@classmethod 

def from_inputs(cls, workdir, inputs, manager=None, pickle_protocol=-1, task_class=ScfTask, work_class=Work): 

""" 

Construct a simple flow from a list of inputs. The flow contains a single Work with 

tasks whose class is given by task_class. 

 

.. warning:: 

 

Don't use this interface if you have dependencies among the tasks. 

 

Args: 

workdir: String specifying the directory where the works will be produced. 

inputs: List of inputs. 

manager: :class:`TaskManager` object responsible for the submission of the jobs. 

If manager is None, the object is initialized from the yaml file 

located either in the working directory or in the user configuration dir. 

pickle_procol: Pickle protocol version used for saving the status of the object. 

-1 denotes the latest version supported by the python interpreter. 

task_class: The class of the :class:`Task`. 

work_class: The class of the :class:`Work`. 

""" 

if not isinstance(inputs, (list, tuple)): inputs = [inputs] 

 

flow = cls(workdir, manager=manager, pickle_protocol=pickle_protocol) 

work = work_class() 

for inp in inputs: 

work.register(inp, task_class=task_class) 

flow.register_work(work) 

 

return flow.allocate() 

 

@classmethod 

def as_flow(cls, obj): 

"""Convert obj into a Flow. Accepts filepath, dict, or Flow object.""" 

if isinstance(obj, cls): return obj 

if is_string(obj): 

return cls.pickle_load(obj) 

elif isinstance(obj, collections.Mapping): 

return cls.from_dict(obj) 

else: 

raise TypeError("Don't know how to convert type %s into a Flow" % type(obj)) 

 

def __init__(self, workdir, manager=None, pickle_protocol=-1): 

""" 

Args: 

workdir: String specifying the directory where the works will be produced. 

if workdir is None, the initialization of the working directory 

is performed by flow.allocate(workdir). 

manager: :class:`TaskManager` object responsible for the submission of the jobs. 

If manager is None, the object is initialized from the yaml file 

located either in the working directory or in the user configuration dir. 

pickle_procol: Pickle protocol version used for saving the status of the object. 

-1 denotes the latest version supported by the python interpreter. 

""" 

super(Flow, self).__init__() 

 

if workdir is not None: 

self.set_workdir(workdir) 

 

self.creation_date = time.asctime() 

 

if manager is None: manager = TaskManager.from_user_config() 

self.manager = manager.deepcopy() 

 

# List of works. 

self._works = [] 

 

self._waited = 0 

 

# List of callbacks that must be executed when the dependencies reach S_OK 

self._callbacks = [] 

 

# Install default list of handlers at the flow level. 

# Users can override the default list by calling flow.install_event_handlers in the script. 

# Example: 

# 

# # flow level (common case) 

# flow.install_event_handlers(handlers=my_handlers) 

# 

# # task level (advanced mode) 

# flow[0][0].install_event_handlers(handlers=my_handlers) 

# 

self.install_event_handlers() 

 

self.pickle_protocol = int(pickle_protocol) 

 

# ID used to access mongodb 

self._mongo_id = None 

 

# Save the location of the script used to generate the flow. 

# This trick won't work if we are running with nosetests, py.test etc 

pyfile = find_top_pyfile() 

if "python" in pyfile or "ipython" in pyfile: pyfile = "<" + pyfile + ">" 

self.set_pyfile(pyfile) 

 

# TODO 

# Signal slots: a dictionary with the list 

# of callbacks indexed by node_id and SIGNAL_TYPE. 

# When the node changes its status, it broadcast a signal. 

# The flow is listening to all the nodes of the calculation 

# [node_id][SIGNAL] = list_of_signal_handlers 

#self._sig_slots = slots = {} 

#for work in self: 

# slots[work] = {s: [] for s in work.S_ALL} 

 

#for task in self.iflat_tasks(): 

# slots[task] = {s: [] for s in work.S_ALL} 

 

@pmg_serialize 

def as_dict(self, **kwargs): 

""" 

JSON serialization, note that we only need to save 

a string with the working directory since the object will be 

reconstructed from the pickle file located in workdir 

""" 

return {"workdir": self.workdir} 

 

# This is needed for fireworks. 

to_dict = as_dict 

 

@classmethod 

def from_dict(cls, d, **kwargs): 

"""Reconstruct the flow from the pickle file.""" 

return cls.pickle_load(d["workdir"], **kwargs) 

 

def set_workdir(self, workdir, chroot=False): 

""" 

Set the working directory. Cannot be set more than once unless chroot is True 

""" 

if not chroot and hasattr(self, "workdir") and self.workdir != workdir: 

raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir)) 

 

# Directories with (input|output|temporary) data. 

self.workdir = os.path.abspath(workdir) 

self.indir = Directory(os.path.join(self.workdir, "indata")) 

self.outdir = Directory(os.path.join(self.workdir, "outdata")) 

self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata")) 

 

@classmethod 

def pickle_load(cls, filepath, spectator_mode=True, remove_lock=False): 

""" 

Loads the object from a pickle file and performs initial setup. 

 

Args: 

filepath: Filename or directory name. It filepath is a directory, we 

scan the directory tree starting from filepath and we 

read the first pickle database. Raise RuntimeError if multiple 

databases are found. 

spectator_mode: If True, the nodes of the flow are not connected by signals. 

This option is usually used when we want to read a flow 

in read-only mode and we want to avoid callbacks that can change the flow. 

remove_lock: 

True to remove the file lock if any (use it carefully). 

""" 

if os.path.isdir(filepath): 

# Walk through each directory inside path and find the pickle database. 

for dirpath, dirnames, filenames in os.walk(filepath): 

fnames = [f for f in filenames if f == cls.PICKLE_FNAME] 

if fnames: 

if len(fnames) == 1: 

filepath = os.path.join(dirpath, fnames[0]) 

break # Exit os.walk 

else: 

err_msg = "Found multiple databases:\n %s" % str(fnames) 

raise RuntimeError(err_msg) 

else: 

err_msg = "Cannot find %s inside directory %s" % (cls.PICKLE_FNAME, filepath) 

raise ValueError(err_msg) 

 

if remove_lock and os.path.exists(filepath + ".lock"): 

try: 

os.remove(filepath + ".lock") 

except: 

pass 

 

with FileLock(filepath): 

with open(filepath, "rb") as fh: 

flow = pmg_pickle_load(fh) 

 

# Check if versions match. 

if flow.VERSION != cls.VERSION: 

msg = ("File flow version %s != latest version %s\n." 

"Regenerate the flow to solve the problem " % (flow.VERSION, cls.VERSION)) 

warnings.warn(msg) 

 

flow.set_spectator_mode(spectator_mode) 

 

# Recompute the status of each task since tasks that 

# have been submitted previously might be completed. 

flow.check_status() 

return flow 

 

@classmethod 

def pickle_loads(cls, s): 

"""Reconstruct the flow from a string.""" 

strio = StringIO() 

strio.write(s) 

strio.seek(0) 

flow = pmg_pickle_load(strio) 

return flow 

 

def __len__(self): 

return len(self.works) 

 

def __iter__(self): 

return self.works.__iter__() 

 

def __getitem__(self, slice): 

return self.works[slice] 

 

def set_pyfile(self, pyfile): 

""" 

Set the path of the python script used to generate the flow. 

 

.. Example: 

 

flow.set_pyfile(__file__) 

""" 

# TODO: Could use a frame hack to get the caller outside abinitio 

# so that pyfile is automatically set when we __init__ it! 

self._pyfile = os.path.abspath(pyfile) 

 

@property 

def pyfile(self): 

""" 

Absolute path of the python script used to generate the flow. Set by `set_pyfile` 

""" 

try: 

return self._pyfile 

except AttributeError: 

return None 

 

@property 

def pid_file(self): 

"""The path of the pid file created by PyFlowScheduler.""" 

return os.path.join(self.workdir, "_PyFlowScheduler.pid") 

 

def check_pid_file(self): 

""" 

This function checks if we are already running the :class:`Flow` with a :class:`PyFlowScheduler`. 

Raises: Flow.Error if the pif file of the scheduler exists. 

""" 

if not os.path.exists(self.pid_file): 

return 0 

 

self.show_status() 

raise self.Error("""\n\ 

pid_file 

%s 

already exists. There are two possibilities: 

 

1) There's an another instance of PyFlowScheduler running 

2) The previous scheduler didn't exit in a clean way 

 

To solve case 1: 

Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file) 

Then you can restart the new scheduler. 

 

To solve case 2: 

Remove the pid_file and restart the scheduler. 

 

Exiting""" % self.pid_file) 

 

@property 

def pickle_file(self): 

"""The path of the pickle file.""" 

return os.path.join(self.workdir, self.PICKLE_FNAME) 

 

@property 

def mongo_id(self): 

return self._mongo_id 

 

@mongo_id.setter 

def mongo_id(self, value): 

if self.mongo_id is not None: 

raise RuntimeError("Cannot change mongo_id %s" % self.mongo_id) 

self._mongo_id = value 

 

def mongodb_upload(self, **kwargs): 

from abiflows.core.scheduler import FlowUploader 

FlowUploader().upload(self, **kwargs) 

 

def validate_json_schema(self): 

"""Validate the JSON schema. Return list of errors.""" 

errors = [] 

 

for work in self: 

for task in work: 

if not task.get_results().validate_json_schema(): 

errors.append(task) 

if not work.get_results().validate_json_schema(): 

errors.append(work) 

if not self.get_results().validate_json_schema(): 

errors.append(self) 

 

return errors 

 

def get_mongo_info(self): 

""" 

Return a JSON dictionary with information on the flow. 

Mainly used for constructing the info section in `FlowEntry`. 

The default implementation is empty. Subclasses must implement it 

""" 

return {} 

 

def mongo_assimilate(self): 

""" 

This function is called by client code when the flow is completed 

Return a JSON dictionary with the most important results produced 

by the flow. The default implementation is empty. Subclasses must implement it 

""" 

return {} 

 

@property 

def works(self): 

"""List of :class:`Work` objects contained in self..""" 

return self._works 

 

@property 

def all_ok(self): 

"""True if all the tasks in works have reached `S_OK`.""" 

return all(work.all_ok for work in self) 

 

@property 

def num_tasks(self): 

"""Total number of tasks""" 

return len(list(self.iflat_tasks())) 

 

@property 

def errored_tasks(self): 

"""List of errored tasks.""" 

etasks = [] 

for status in [self.S_ERROR, self.S_QCRITICAL, self.S_ABICRITICAL]: 

etasks.extend(list(self.iflat_tasks(status=status))) 

 

return set(etasks) 

 

@property 

def num_errored_tasks(self): 

"""The number of tasks whose status is `S_ERROR`.""" 

return len(self.errored_tasks) 

 

@property 

def unconverged_tasks(self): 

"""List of unconverged tasks.""" 

return list(self.iflat_tasks(status=self.S_UNCONVERGED)) 

 

@property 

def num_unconverged_tasks(self): 

"""The number of tasks whose status is `S_UNCONVERGED`.""" 

return len(self.unconverged_tasks) 

 

@property 

def status_counter(self): 

""" 

Returns a :class:`Counter` object that counts the number of tasks with 

given status (use the string representation of the status as key). 

""" 

# Count the number of tasks with given status in each work. 

counter = self[0].status_counter 

for work in self[1:]: 

counter += work.status_counter 

 

return counter 

 

@property 

def ncores_reserved(self): 

""" 

Returns the number of cores reserved in this moment. 

A core is reserved if the task is not running but 

we have submitted the task to the queue manager. 

""" 

return sum(work.ncores_reserved for work in self) 

 

@property 

def ncores_allocated(self): 

""" 

Returns the number of cores allocated in this moment. 

A core is allocated if it's running a task or if we have 

submitted a task to the queue manager but the job is still pending. 

""" 

return sum(work.ncores_allocated for work in self) 

 

@property 

def ncores_used(self): 

""" 

Returns the number of cores used in this moment. 

A core is used if there's a job that is running on it. 

""" 

return sum(work.ncores_used for work in self) 

 

@property 

def has_chrooted(self): 

""" 

Returns a string that evaluates to True if we have changed 

the workdir for visualization purposes e.g. we are using sshfs. 

to mount the remote directory where the `Flow` is located. 

The string gives the previous workdir of the flow. 

""" 

try: 

return self._chrooted_from 

except AttributeError: 

return "" 

 

def chroot(self, new_workdir): 

""" 

Change the workir of the :class:`Flow`. Mainly used for 

allowing the user to open the GUI on the local host 

and access the flow from remote via sshfs. 

 

.. note:: 

Calling this method will make the flow go in read-only mode. 

""" 

self._chrooted_from = self.workdir 

self.set_workdir(new_workdir, chroot=True) 

 

for i, work in enumerate(self): 

new_wdir = os.path.join(self.workdir, "w" + str(i)) 

work.chroot(new_wdir) 

 

def groupby_status(self): 

""" 

Returns a ordered dictionary mapping the task status to 

the list of named tuples (task, work_index, task_index). 

""" 

Entry = collections.namedtuple("Entry", "task wi ti") 

d = collections.defaultdict(list) 

 

for task, wi, ti in self.iflat_tasks_wti(): 

d[task.status].append(Entry(task, wi, ti)) 

 

# Sort keys according to their status. 

return OrderedDict([(k, d[k]) for k in sorted(list(d.keys()))]) 

 

def groupby_task_class(self): 

""" 

Returns a dictionary mapping the task class to the list of tasks in the flow 

""" 

# Find all Task classes 

class2tasks = OrderedDict() 

for task in self.iflat_tasks(): 

cls = task.__class__ 

if cls not in class2tasks: class2tasks[cls] = [] 

class2tasks[cls].append(task) 

 

return class2tasks 

 

def iflat_nodes(self, status=None, op="==", nids=None): 

""" 

Generators that produces a flat sequence of nodes. 

if status is not None, only the tasks with the specified status are selected. 

nids is an optional list of node identifiers used to filter the nodes. 

""" 

nids = as_set(nids) 

 

if status is None: 

if not (nids and self.node_id not in nids): 

yield self 

 

for work in self: 

if nids and work.node_id not in nids: continue 

yield work 

for task in work: 

if nids and task.node_id not in nids: continue 

yield task 

else: 

# Get the operator from the string. 

op = operator_from_str(op) 

 

# Accept Task.S_FLAG or string. 

status = Status.as_status(status) 

 

if not (nids and self.node_id not in nids): 

if op(self.status, status): yield self 

 

for wi, work in enumerate(self): 

if nids and work.node_id not in nids: continue 

if op(work.status, status): yield work 

 

for ti, task in enumerate(work): 

if nids and task.node_id not in nids: continue 

if op(task.status, status): yield task 

 

def node_from_nid(self, nid): 

"""Return the node in the `Flow` with the given `nid` identifier""" 

for node in self.iflat_nodes(): 

if node.node_id == nid: return node 

raise ValueError("Cannot find node with node id: %s" % nid) 

 

def iflat_tasks_wti(self, status=None, op="==", nids=None): 

""" 

Generator to iterate over all the tasks of the `Flow`. 

Yields: 

 

(task, work_index, task_index) 

 

If status is not None, only the tasks whose status satisfies 

the condition (task.status op status) are selected 

status can be either one of the flags defined in the :class:`Task` class 

(e.g Task.S_OK) or a string e.g "S_OK" 

nids is an optional list of node identifiers used to filter the tasks. 

""" 

return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=True) 

 

def iflat_tasks(self, status=None, op="==", nids=None): 

""" 

Generator to iterate over all the tasks of the :class:`Flow`. 

 

If status is not None, only the tasks whose status satisfies 

the condition (task.status op status) are selected 

status can be either one of the flags defined in the :class:`Task` class 

(e.g Task.S_OK) or a string e.g "S_OK" 

nids is an optional list of node identifiers used to filter the tasks. 

""" 

return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=False) 

 

def _iflat_tasks_wti(self, status=None, op="==", nids=None, with_wti=True): 

""" 

Generators that produces a flat sequence of task. 

if status is not None, only the tasks with the specified status are selected. 

nids is an optional list of node identifiers used to filter the tasks. 

 

Returns: 

(task, work_index, task_index) if with_wti is True else task 

""" 

nids = as_set(nids) 

 

if status is None: 

for wi, work in enumerate(self): 

for ti, task in enumerate(work): 

if nids and task.node_id not in nids: continue 

if with_wti: 

yield task, wi, ti 

else: 

yield task 

 

else: 

# Get the operator from the string. 

op = operator_from_str(op) 

 

# Accept Task.S_FLAG or string. 

status = Status.as_status(status) 

 

for wi, work in enumerate(self): 

for ti, task in enumerate(work): 

if nids and task.node_id not in nids: continue 

if op(task.status, status): 

if with_wti: 

yield task, wi, ti 

else: 

yield task 

 

def show_inpvars(self, *varnames): 

from abipy.htc.variable import InputVariable 

lines = [] 

app = lines.append 

 

for task in self.iflat_tasks(): 

app(str(task)) 

for name in varnames: 

value = task.input.get(name) 

app(str(InputVariable(name, value))) 

 

return "\n".join(lines) 

 

def abivalidate_inputs(self): 

""" 

Run ABINIT in dry mode to validate all the inputs of the flow. 

 

Return: 

(isok, tuples) 

 

isok is True if all inputs are ok. 

tuples is List of `namedtuple` objects, one for each task in the flow. 

Each namedtuple has the following attributes: 

 

retcode: Return code. 0 if OK. 

log_file: log file of the Abinit run, use log_file.read() to access its content. 

stderr_file: stderr file of the Abinit run. use stderr_file.read() to access its content. 

 

Raises: 

`RuntimeError` if executable is not in $PATH. 

""" 

if not self.allocated: 

self.build() 

#self.build_and_pickle_dump() 

 

isok, tuples = True, [] 

for task in self.iflat_tasks(): 

t = task.input.abivalidate() 

if t.retcode != 0: isok = False 

tuples.append(t) 

 

return isok, tuples 

 

def check_dependencies(self): 

"""Test the dependencies of the nodes for possible deadlocks.""" 

deadlocks = [] 

 

for task in self.iflat_tasks(): 

for dep in task.deps: 

if dep.node.depends_on(task): 

deadlocks.append((task, dep.node)) 

 

if deadlocks: 

lines = ["Detect wrong list of dependecies that will lead to a deadlock:"] 

lines.extend(["%s <--> %s" % nodes for nodes in deadlocks]) 

raise RuntimeError("\n".join(lines)) 

 

def find_deadlocks(self): 

""" 

This function detects deadlocks 

 

Return: 

named tuple with the tasks grouped in: deadlocks, runnables, running 

""" 

# Find jobs that can be submitted and and the jobs that are already in the queue. 

runnables = [] 

for work in self: 

runnables.extend(work.fetch_alltasks_to_run()) 

runnables.extend(list(self.iflat_tasks(status=self.S_SUB))) 

 

# Running jobs. 

running = list(self.iflat_tasks(status=self.S_RUN)) 

 

# Find deadlocks. 

err_tasks = self.errored_tasks 

deadlocked = [] 

if err_tasks: 

for task in self.iflat_tasks(): 

if any(task.depends_on(err_task) for err_task in err_tasks): 

deadlocked.append(task) 

 

return dict2namedtuple(deadlocked=deadlocked, runnables=runnables, running=running) 

 

def check_status(self, **kwargs): 

""" 

Check the status of the works in self. 

 

Args: 

show: True to show the status of the flow. 

kwargs: keyword arguments passed to show_status 

""" 

for work in self: 

work.check_status() 

 

if kwargs.pop("show", False): 

self.show_status(**kwargs) 

 

@property 

def status(self): 

"""The status of the :class:`Flow` i.e. the minimum of the status of its tasks and its works""" 

return min(work.get_all_status(only_min=True) for work in self) 

 

#def restart_unconverged_tasks(self, max_nlauch, excs): 

# nlaunch = 0 

# for task in self.unconverged_tasks: 

# try: 

# logger.info("Flow will try restart task %s" % task) 

# fired = task.restart() 

# if fired: 

# nlaunch += 1 

# max_nlaunch -= 1 

 

# if max_nlaunch == 0: 

# logger.info("Restart: too many jobs in the queue, returning") 

# self.pickle_dump() 

# return nlaunch, max_nlaunch 

 

# except task.RestartError: 

# excs.append(straceback()) 

 

# return nlaunch, max_nlaunch 

 

def fix_abicritical(self): 

""" 

This function tries to fix critical events originating from ABINIT. 

Returns the number of tasks that have been fixed. 

""" 

count = 0 

for task in self.iflat_tasks(status=self.S_ABICRITICAL): 

count += task.fix_abicritical() 

 

return count 

 

def fix_queue_critical(self): 

""" 

This function tries to fix critical events originating from the queue submission system. 

 

Returns the number of tasks that have been fixed. 

""" 

count = 0 

for task in self.iflat_tasks(status=self.S_QCRITICAL): 

logger.info("Will try to fix task %s" % str(task)) 

try: 

task.fix_queue_critical() 

count += 1 

except FixQueueCriticalError: 

logger.info("Not able to fix task %s" % task) 

 

return count 

 

def show_info(self, **kwargs): 

"""Print info on the flow i.e. total number of tasks, works, tasks grouped by class.""" 

stream = kwargs.pop("stream", sys.stdout) 

 

lines = [str(self)] 

app = lines.append 

 

app("Number of works: %d, total number of tasks: %s" % (len(self), self.num_tasks) ) 

app("Number of tasks with a given class:") 

 

# Build Table 

data = [[cls.__name__, len(tasks)] 

for cls, tasks in self.groupby_task_class().items()] 

app(str(tabulate(data, headers=["Task Class", "Number"]))) 

 

stream.write("\n".join(lines)) 

 

def show_summary(self, **kwargs): 

""" 

Print a short summary with the status of the flow and a counter task_status --> number_of_tasks 

 

Args: 

stream: File-like object, Default: sys.stdout 

""" 

stream = kwargs.pop("stream", sys.stdout) 

 

lines = ["%s, num_tasks=%s, all_ok=%s" % (str(self), self.num_tasks, self.all_ok)] 

for k, v in self.status_counter.items(): 

lines.append(" %s: %s" % (k, v)) 

lines.append("") 

 

stream.write("\n".join(lines)) 

 

def show_status(self, **kwargs): 

""" 

Report the status of the works and the status of the different tasks on the specified stream. 

 

Args: 

stream: File-like object, Default: sys.stdout 

nids: List of node identifiers. By defaults all nodes are shown 

wslice: Slice object used to select works. 

verbose: Verbosity level (default 0). > 0 to show only the works that are not finalized. 

""" 

stream = kwargs.pop("stream", sys.stdout) 

nids = as_set(kwargs.pop("nids", None)) 

wslice = kwargs.pop("wslice", None) 

wlist = None 

if wslice is not None: 

# Convert range to list of work indices. 

wlist = list(range(wslice.start, wslice.step, wslice.stop)) 

verbose = kwargs.pop("verbose", 0) 

 

#has_colours = stream_has_colours(stream) 

has_colours = True 

red = "red" if has_colours else None 

 

for i, work in enumerate(self): 

print("", file=stream) 

cprint_map("Work #%d: %s, Finalized=%s" % (i, work, work.finalized), cmap={"True": "green"}, file=stream) 

if wlist is not None and i in wlist: continue 

if verbose == 0 and work.finalized: 

print(" Finalized works are not shown. Use verbose > 0 to force output.", file=stream) 

continue 

 

headers = ["Task", "Status", "Queue", "MPI|Omp|Gb", 

"Err|Warn|Com", "Class", "Rest|Sub|Corr", "Time", 

"Node_ID"] 

table = [] 

tot_num_errors = 0 

for task in work: 

if nids and task.node_id not in nids: continue 

task_name = os.path.basename(task.name) 

 

# FIXME: This should not be done here. 

# get_event_report should be called only in check_status 

# Parse the events in the main output. 

report = task.get_event_report() 

 

# Get time info (run-time or time in queue or None) 

stime = None 

timedelta = task.datetimes.get_runtime() 

if timedelta is not None: 

stime = str(timedelta) + "R" 

else: 

timedelta = task.datetimes.get_time_inqueue() 

if timedelta is not None: 

stime = str(timedelta) + "Q" 

 

events = "|".join(3*["NA"]) 

if report is not None: 

#events = "%03.d"|".join(map(str, [report.num_errors, report.num_warnings, report.num_comments])) 

events = '{:>3}|{:>4}|{:>3}'.format(*map(str, (report.num_errors, report.num_warnings, report.num_comments))) 

 

#para_info = "|".join(map(str, (task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb")))) 

para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb")))) 

 

task_info = list(map(str, [task.__class__.__name__, 

(task.num_restarts, task.num_launches, task.num_corrections), stime, task.node_id])) 

 

qinfo = "None" 

if task.queue_id is not None: 

qinfo = str(task.queue_id) + "@" + str(task.qname) 

 

if task.status.is_critical: 

tot_num_errors += 1 

task_name = colored(task_name, red) 

 

if has_colours: 

table.append([task_name, task.status.colored, qinfo, 

para_info, events] + task_info) 

else: 

table.append([task_name, str(task.status), qinfo, events, 

para_info] + task_info) 

 

# Print table and write colorized line with the total number of errors. 

print(tabulate(table, headers=headers), file=stream) 

if tot_num_errors: 

cprint("Total number of errors: %d" % tot_num_errors, red, file=stream) 

print("", file=stream) 

 

if self.all_ok: 

print("\nall_ok reached\n", file=stream) 

 

def show_inputs(self, nids=None, wslice=None, stream=sys.stdout): 

""" 

Print the input of the tasks to the given stream. 

 

Args: 

stream: 

File-like object, Default: sys.stdout 

nids: 

List of node identifiers. By defaults all nodes are shown 

wslice: Slice object used to select works. 

""" 

lines = [] 

for task in self.select_tasks(nids=nids, wslice=wslice): 

s = task.make_input(with_header=True) 

 

# Add info on dependencies. 

if task.deps: 

s += "\n\nDependencies:\n" + "\n".join(str(dep) for dep in task.deps) 

else: 

s += "\n\nDependencies: None" 

 

lines.append(2*"\n" + 80 * "=" + "\n" + s + 2*"\n") 

 

stream.writelines(lines) 

 

def listext(self, ext, stream=sys.stdout): 

""" 

Print to the given `stream` a table with the list of the output files 

with the given `ext` produced by the flow. 

""" 

nodes_files = [] 

for node in self.iflat_nodes(): 

filepath = node.outdir.has_abiext(ext) 

if filepath: 

nodes_files.append((node, File(filepath))) 

 

if nodes_files: 

print("Found %s files with extension %s produced by the flow" % (len(nodes_files), ext), file=stream) 

 

table = [[f.relpath, "%.2f" % (f.get_stat().st_size / 1024**2), 

node.node_id, node.__class__.__name__] 

for node, f in nodes_files] 

print(tabulate(table, headers=["File", "Size [Mb]", "Node_ID", "Node Class"]), file=stream) 

 

else: 

print("No output file with extension %s has been produced by the flow" % ext, file=stream) 

 

def select_tasks(self, nids=None, wslice=None): 

""" 

Return a list with a subset of tasks. 

 

Args: 

nids: List of node identifiers. 

wslice: Slice object used to select works. 

 

.. note:: 

 

nids and wslice are mutually exclusive. 

If no argument is provided, the full list of tasks is returned. 

""" 

if nids is not None: 

assert wslice is None 

tasks = self.tasks_from_nids(nids) 

 

elif wslice is not None: 

tasks = [] 

for work in self[wslice]: 

tasks.extend([t for t in work]) 

else: 

# All tasks selected if no option is provided. 

tasks = list(self.iflat_tasks()) 

 

return tasks 

 

def inspect(self, nids=None, wslice=None, **kwargs): 

""" 

Inspect the tasks (SCF iterations, Structural relaxation ...) and 

produces matplotlib plots. 

 

Args: 

nids: List of node identifiers. 

wslice: Slice object used to select works. 

kwargs: keyword arguments passed to `task.inspect` method. 

 

.. note:: 

 

nids and wslice are mutually exclusive. 

If nids and wslice are both None, all tasks in self are inspected. 

 

Returns: 

List of `matplotlib` figures. 

""" 

figs = [] 

for task in self.select_tasks(nids=nids, wslice=wslice): 

if hasattr(task, "inspect"): 

fig = task.inspect(**kwargs) 

if fig is None: 

cprint("Cannot inspect Task %s" % task, color="blue") 

else: 

figs.append(fig) 

else: 

cprint("Task %s does not provide an inspect method" % task, color="blue") 

 

return figs 

 

def get_results(self, **kwargs): 

results = self.Results.from_node(self) 

results.update(self.get_dict_for_mongodb_queries()) 

return results 

 

def get_dict_for_mongodb_queries(self): 

""" 

This function returns a dictionary with the attributes that will be 

put in the mongodb document to facilitate the query. 

Subclasses may want to replace or extend the default behaviour. 

""" 

d = {} 

return d 

# TODO 

all_structures = [task.input.structure for task in self.iflat_tasks()] 

all_pseudos = [task.input.pseudos for task in self.iflat_tasks()] 

 

def look_before_you_leap(self): 

""" 

This method should be called before running the calculation to make 

sure that the most important requirements are satisfied. 

 

Return: 

List of strings with inconsistencies/errors. 

""" 

errors = [] 

 

try: 

self.check_dependencies() 

except self.Error as exc: 

errors.append(str(exc)) 

 

if self.has_db: 

try: 

self.manager.db_connector.get_collection() 

except Exception as exc: 

errors.append(""" 

ERROR while trying to connect to the MongoDB database: 

Exception: 

%s 

Connector: 

%s 

""" % (exc, self.manager.db_connector)) 

 

return "\n".join(errors) 

 

@property 

def has_db(self): 

"""True if flow uses `MongoDB` to store the results.""" 

return self.manager.has_db 

 

def db_insert(self): 

""" 

Insert results in the `MongDB` database. 

""" 

assert self.has_db 

# Connect to MongoDb and get the collection. 

coll = self.manager.db_connector.get_collection() 

print("Mongodb collection %s with count %d", coll, coll.count()) 

 

start = time.time() 

for work in self: 

for task in work: 

results = task.get_results() 

pprint(results) 

results.update_collection(coll) 

results = work.get_results() 

pprint(results) 

results.update_collection(coll) 

print("MongoDb update done in %s [s]" % time.time() - start) 

 

results = self.get_results() 

pprint(results) 

results.update_collection(coll) 

 

# Update the pickle file to save the mongo ids. 

self.pickle_dump() 

 

for d in coll.find(): 

pprint(d) 

 

def tasks_from_nids(self, nids): 

""" 

Return the list of tasks associated to the given list of node identifiers (nids). 

 

.. note:: 

 

Invalid ids are ignored 

""" 

if not isinstance(nids, collections.Iterable): nids = [nids] 

 

tasks = [] 

for nid in nids: 

for task in self.iflat_tasks(): 

if task.node_id == nid: 

tasks.append(task) 

break 

 

return tasks 

 

def wti_from_nids(self, nids): 

"""Return the list of (w, t) indices from the list of node identifiers nids.""" 

return [task.pos for task in self.tasks_from_nids(nids)] 

 

def open_files(self, what="o", status=None, op="==", nids=None, editor=None): 

""" 

Open the files of the flow inside an editor (command line interface). 

 

Args: 

what: string with the list of characters selecting the file type 

Possible choices: 

i ==> input_file, 

o ==> output_file, 

f ==> files_file, 

j ==> job_file, 

l ==> log_file, 

e ==> stderr_file, 

q ==> qout_file, 

all ==> all files. 

status: if not None, only the tasks with this status are select 

op: status operator. Requires status. A task is selected 

if task.status op status evaluates to true. 

nids: optional list of node identifiers used to filter the tasks. 

editor: Select the editor. None to use the default editor ($EDITOR shell env var) 

""" 

# Build list of files to analyze. 

files = [] 

for task in self.iflat_tasks(status=status, op=op, nids=nids): 

lst = task.select_files(what) 

if lst: 

files.extend(lst) 

 

return Editor(editor=editor).edit_files(files) 

 

def get_abitimer(self, nids=None): 

""" 

Parse the timer data in the main output file(s) of Abinit. 

 

Args: 

nids: optional list of node identifiers used to filter the tasks. 

 

Return: :class:`AbinitTimerParser` instance, None if error. 

""" 

# Get the list of output files according to nids. 

paths = [task.output_file.path for task in self.iflat_tasks(nids=nids)] 

 

# Parse data. 

from .abitimer import AbinitTimerParser 

parser = AbinitTimerParser() 

read_ok = parser.parse(paths) 

if read_ok: 

return parser 

return None 

 

def show_abierrors(self, nids=None, stream=sys.stdout): 

""" 

Write to the given stream the list of ABINIT errors for all tasks whose status is S_ABICRITICAL. 

 

Args: 

nids: optional list of node identifiers used to filter the tasks. 

stream: File-like object. Default: sys.stdout 

""" 

lines = [] 

app = lines.append 

 

for task in self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids): 

header = "=== " + task.qout_file.path + "===" 

app(header) 

report = task.get_event_report() 

 

if report is not None: 

app("num_errors: %s, num_warnings: %s, num_comments: %s" % ( 

report.num_errors, report.num_warnings, report.num_comments)) 

 

app("*** ERRORS ***") 

app("\n".join(str(e) for e in report.errors)) 

 

app("*** BUGS ***") 

app("\n".join(str(b) for b in report.bugs)) 

 

else: 

app("get_envent_report returned None!") 

 

 

app("=" * len(header) + 2*"\n") 

 

return stream.writelines(lines) 

 

def show_qouts(self, nids=None, stream=sys.stdout): 

""" 

Write to the given stream the content of the queue output file for all tasks whose status is S_QCRITICAL. 

 

Args: 

nids: optional list of node identifiers used to filter the tasks. 

stream: File-like object. Default: sys.stdout 

""" 

lines = [] 

 

for task in self.iflat_tasks(status=self.S_QCRITICAL, nids=nids): 

header = "=== " + task.qout_file.path + "===" 

lines.append(header) 

if task.qout_file.exists: 

with open(task.qout_file.path, "rt") as fh: 

lines += fh.readlines() 

else: 

lines.append("File does not exist!") 

 

lines.append("=" * len(header) + 2*"\n") 

 

return stream.writelines(lines) 

 

def cancel(self, nids=None): 

""" 

Cancel all the tasks that are in the queue. 

nids is an optional list of node identifiers used to filter the tasks. 

 

Returns: 

Number of jobs cancelled, negative value if error 

""" 

if self.has_chrooted: 

# TODO: Use paramiko to kill the job? 

warnings.warn("Cannot cancel the flow via sshfs!") 

return -1 

 

# If we are running with the scheduler, we must send a SIGKILL signal. 

if os.path.exists(self.pid_file): 

print("Found scheduler attached to this flow.") 

print("Will send SIGKILL to the scheduler before cancelling the tasks!") 

 

with open(self.pid_file, "r") as fh: 

pid = int(fh.readline()) 

 

retcode = os.system("kill -9 %d" % pid) 

self.history.info("Sent SIGKILL to the scheduler, retcode: %s" % retcode) 

try: 

os.remove(self.pid_file) 

except IOError: 

pass 

 

num_cancelled = 0 

for task in self.iflat_tasks(nids=nids): 

num_cancelled += task.cancel() 

 

return num_cancelled 

 

def get_njobs_in_queue(self, username=None): 

""" 

returns the number of jobs in the queue, None when the number of jobs cannot be determined. 

 

Args: 

username: (str) the username of the jobs to count (default is to autodetect) 

""" 

return self.manager.qadapter.get_njobs_in_queue(username=username) 

 

def rmtree(self, ignore_errors=False, onerror=None): 

"""Remove workdir (same API as shutil.rmtree).""" 

if not os.path.exists(self.workdir): return 

shutil.rmtree(self.workdir, ignore_errors=ignore_errors, onerror=onerror) 

 

def rm_and_build(self): 

"""Remove the workdir and rebuild the flow.""" 

self.rmtree() 

self.build() 

 

def build(self, *args, **kwargs): 

"""Make directories and files of the `Flow`.""" 

# Allocate here if not done yet! 

if not self.allocated: self.allocate() 

 

self.indir.makedirs() 

self.outdir.makedirs() 

self.tmpdir.makedirs() 

 

# Check the nodeid file in workdir 

nodeid_path = os.path.join(self.workdir, ".nodeid") 

 

if os.path.exists(nodeid_path): 

with open(nodeid_path, "rt") as fh: 

node_id = int(fh.read()) 

 

if self.node_id != node_id: 

msg = ("\nFound node_id %s in file:\n\n %s\n\nwhile the node_id of the present flow is %d.\n" 

"This means that you are trying to build a new flow in a directory already used by another flow.\n" 

"Possible solutions:\n" 

" 1) Change the workdir of the new flow.\n" 

" 2) remove the old directory either with `rm -rf` or by calling the method flow.rmtree()\n" 

% (node_id, nodeid_path, self.node_id)) 

raise RuntimeError(msg) 

 

else: 

with open(nodeid_path, "wt") as fh: 

fh.write(str(self.node_id)) 

 

for work in self: 

work.build(*args, **kwargs) 

 

def build_and_pickle_dump(self): 

""" 

Build dirs and file of the `Flow` and save the object in pickle format. 

Returns 0 if success 

""" 

self.build() 

return self.pickle_dump() 

 

@check_spectator 

def pickle_dump(self): 

""" 

Save the status of the object in pickle format. 

Returns 0 if success 

""" 

if self.has_chrooted: 

warnings.warn("Cannot pickle_dump since we have chrooted from %s" % self.has_chrooted) 

return -1 

 

#if self.in_spectator_mode: 

# warnings.warn("Cannot pickle_dump since flow is in_spectator_mode") 

# return -2 

 

protocol = self.pickle_protocol 

 

# Atomic transaction with FileLock. 

with FileLock(self.pickle_file): 

with AtomicFile(self.pickle_file, mode="wb") as fh: 

pmg_pickle_dump(self, fh, protocol=protocol) 

 

return 0 

 

def pickle_dumps(self, protocol=None): 

""" 

Return a string with the pickle representation. 

`protocol` selects the pickle protocol. self.pickle_protocol is 

used if `protocol` is None 

""" 

strio = StringIO() 

pmg_pickle_dump(self, strio, 

protocol=self.pickle_protocol if protocol is None 

else protocol) 

return strio.getvalue() 

 

def register_task(self, input, deps=None, manager=None, task_class=None): 

""" 

Utility function that generates a `Work` made of a single task 

 

Args: 

input: :class:`AbinitInput` 

deps: List of :class:`Dependency` objects specifying the dependency of this node. 

An empy list of deps implies that this node has no dependencies. 

manager: The :class:`TaskManager` responsible for the submission of the task. 

If manager is None, we use the :class:`TaskManager` specified during the creation of the work. 

task_class: Task subclass to instantiate. Default: :class:`AbinitTask` 

 

Returns: 

The generated :class:`Work` for the task, work[0] is the actual task. 

""" 

work = Work(manager=manager) 

task = work.register(input, deps=deps, task_class=task_class) 

self.register_work(work) 

 

return work 

 

def register_work(self, work, deps=None, manager=None, workdir=None): 

""" 

Register a new :class:`Work` and add it to the internal list, taking into account possible dependencies. 

 

Args: 

work: :class:`Work` object. 

deps: List of :class:`Dependency` objects specifying the dependency of this node. 

An empy list of deps implies that this node has no dependencies. 

manager: The :class:`TaskManager` responsible for the submission of the task. 

If manager is None, we use the `TaskManager` specified during the creation of the work. 

workdir: The name of the directory used for the :class:`Work`. 

 

Returns: 

The registered :class:`Work`. 

""" 

if getattr(self, "workdir", None) is not None: 

# The flow has a directory, build the named of the directory of the work. 

work_workdir = None 

if workdir is None: 

work_workdir = os.path.join(self.workdir, "w" + str(len(self))) 

else: 

work_workdir = os.path.join(self.workdir, os.path.basename(workdir)) 

 

work.set_workdir(work_workdir) 

 

if manager is not None: 

work.set_manager(manager) 

 

self.works.append(work) 

 

if deps: 

deps = [Dependency(node, exts) for node, exts in deps.items()] 

work.add_deps(deps) 

 

return work 

 

def register_work_from_cbk(self, cbk_name, cbk_data, deps, work_class, manager=None): 

""" 

Registers a callback function that will generate the :class:`Task` of the :class:`Work`. 

 

Args: 

cbk_name: Name of the callback function (must be a bound method of self) 

cbk_data: Additional data passed to the callback function. 

deps: List of :class:`Dependency` objects specifying the dependency of the work. 

work_class: :class:`Work` class to instantiate. 

manager: The :class:`TaskManager` responsible for the submission of the task. 

If manager is None, we use the `TaskManager` specified during the creation of the :class:`Flow`. 

 

Returns: 

The :class:`Work` that will be finalized by the callback. 

""" 

# TODO: pass a Work factory instead of a class 

# Directory of the Work. 

work_workdir = os.path.join(self.workdir, "w" + str(len(self))) 

 

# Create an empty work and register the callback 

work = work_class(workdir=work_workdir, manager=manager) 

 

self._works.append(work) 

 

deps = [Dependency(node, exts) for node, exts in deps.items()] 

if not deps: 

raise ValueError("A callback must have deps!") 

 

work.add_deps(deps) 

 

# Wrap the callable in a Callback object and save 

# useful info such as the index of the work and the callback data. 

cbk = FlowCallback(cbk_name, self, deps=deps, cbk_data=cbk_data) 

self._callbacks.append(cbk) 

 

return work 

 

@property 

def allocated(self): 

"""Numer of allocations. Set by `allocate`.""" 

try: 

return self._allocated 

except AttributeError: 

return 0 

 

def allocate(self, workdir=None): 

""" 

Allocate the `Flow` i.e. assign the `workdir` and (optionally) 

the :class:`TaskManager` to the different tasks in the Flow. 

 

Args: 

workdir: Working directory of the flow. Must be specified here 

if we haven't initialized the workdir in the __init__. 

""" 

if workdir is not None: 

# We set the workdir of the flow here 

self.set_workdir(workdir) 

for i, work in enumerate(self): 

work.set_workdir(os.path.join(self.workdir, "w" + str(i))) 

 

if not hasattr(self, "workdir"): 

raise RuntimeError("You must call flow.allocate(workdir) if the workdir is not passed to __init__") 

 

for work in self: 

# Each work has a reference to its flow. 

work.allocate(manager=self.manager) 

work.set_flow(self) 

# Each task has a reference to its work. 

for task in work: 

task.set_work(work) 

 

self.check_dependencies() 

 

if not hasattr(self, "_allocated"): self._allocated = 0 

self._allocated += 1 

 

return self 

 

def use_smartio(self): 

""" 

This function should be called when the entire `Flow` has been built. 

It tries to reduce the pressure on the hard disk by using Abinit smart-io 

capabilities for those files that are not needed by other nodes. 

Smart-io means that big files (e.g. WFK) are written only if the calculation 

is unconverged so that we can restart from it. No output is produced if 

convergence is achieved. 

""" 

if not self.allocated: 

raise RuntimeError("You must call flow.allocate before invoking flow.use_smartio") 

 

for task in self.iflat_tasks(): 

children = task.get_children() 

if not children: 

# Change the input so that output files are produced 

# only if the calculation is not converged. 

task.history.info("Will disable IO for task") 

task._set_inpvars(prtwf=-1, prtden=0) # TODO: prt1wf=-1, 

else: 

must_produce_abiexts = [] 

for child in children: 

# Get the list of dependencies. Find that task 

for d in child.deps: 

must_produce_abiexts.extend(d.exts) 

 

must_produce_abiexts = set(must_produce_abiexts) 

#print("must_produce_abiexts", must_produce_abiexts) 

 

# Variables supporting smart-io. 

smart_prtvars = { 

"prtwf": "WFK", 

} 

 

# Set the variable to -1 to disable the output 

for varname, abiext in smart_prtvars.items(): 

if abiext not in must_produce_abiexts: 

print("%s: setting %s to -1" % (task, varname)) 

task._set_inpvars({varname: -1}) 

 

#def new_from_input_decorators(self, new_workdir, decorators) 

# """ 

# Return a new :class:`Flow` in which all the Abinit inputs have been 

# decorated by decorators. 

# """ 

# # The trick part here is how to assign a new id to the new nodes while maintaing the 

# # correct dependencies! The safest approach would be to pass through __init__ 

# # instead of using copy.deepcopy() 

# return flow 

 

def show_dependencies(self, stream=sys.stdout): 

"""Writes to the given stream the ASCII representation of the dependency tree.""" 

def child_iter(node): 

return [d.node for d in node.deps] 

 

def text_str(node): 

return colored(str(node), color=node.status.color_opts["color"]) 

 

for task in self.iflat_tasks(): 

print(draw_tree(task, child_iter, text_str), file=stream) 

 

def on_dep_ok(self, signal, sender): 

# TODO 

# Replace this callback with dynamic dispatch 

# on_all_S_OK for work 

# on_S_OK for task 

logger.info("on_dep_ok with sender %s, signal %s" % (str(sender), signal)) 

 

for i, cbk in enumerate(self._callbacks): 

if not cbk.handle_sender(sender): 

logger.info("%s does not handle sender %s" % (cbk, sender)) 

continue 

 

if not cbk.can_execute(): 

logger.info("Cannot execute %s" % cbk) 

continue 

 

# Execute the callback and disable it 

self.history.info("flow in on_dep_ok: about to execute callback %s" % str(cbk)) 

cbk() 

cbk.disable() 

 

# Update the database. 

self.pickle_dump() 

 

@check_spectator 

def finalize(self): 

""" 

This method is called when the flow is completed. 

Return 0 if success 

""" 

if self.finalized: 

self.history.warning("Calling finalize on an alrady finalized flow.") 

return 1 

 

self.history.warning("Calling flow.finalize.") 

self.finalized = False 

 

if self.has_db: 

self.history.info("Saving results in database.") 

try: 

self.flow.db_insert() 

self.finalized = True 

except Exception: 

logger.critical("MongoDb insertion failed.") 

return 2 

 

# Here we remove the big output files if we have the garbage collector 

# and the policy is set to "flow." 

if self.gc is not None and self.gc.policy == "flow": 

self.history.info("gc.policy set to flow. Will clean task output files.") 

for task in self.iflat_tasks(): 

task.clean_output_files() 

 

return 0 

 

def set_garbage_collector(self, exts=None, policy="task"): 

""" 

Enable the garbage collector that will remove the big output files that are not needed. 

 

Args: 

exts: string or list with the Abinit file extensions to be removed. A default is 

provided if exts is None 

policy: Either `flow` or `task`. If policy is set to 'task', we remove the output 

files as soon as the task reaches S_OK. If 'flow', the files are removed 

only when the flow is finalized. This option should be used when we are dealing 

with a dynamic flow with callbacks generating other tasks since a :class:`Task` 

might not be aware of its children when it reached S_OK. 

""" 

assert policy in ("task", "flow") 

exts = list_strings(exts) if exts is not None else ("WFK", "SUS", "SCR", "BSR", "BSC") 

 

gc = GarbageCollector(exts=set(exts), policy=policy) 

 

self.set_gc(gc) 

for work in self: 

#work.set_gc(gc) # TODO Add support for Works and flow policy 

for task in work: 

task.set_gc(gc) 

 

def connect_signals(self): 

""" 

Connect the signals within the `Flow`. 

The `Flow` is responsible for catching the important signals raised from its works. 

""" 

# Connect the signals inside each Work. 

for work in self: 

work.connect_signals() 

 

# Observe the nodes that must reach S_OK in order to call the callbacks. 

for cbk in self._callbacks: 

#cbk.enable() 

for dep in cbk.deps: 

logger.info("connecting %s \nwith sender %s, signal %s" % (str(cbk), dep.node, dep.node.S_OK)) 

dispatcher.connect(self.on_dep_ok, signal=dep.node.S_OK, sender=dep.node, weak=False) 

 

# Associate to each signal the callback _on_signal 

# (bound method of the node that will be called by `Flow` 

# Each node will set its attribute _done_signal to True to tell 

# the flow that this callback should be disabled. 

 

# Register the callbacks for the Work. 

#for work in self: 

# slot = self._sig_slots[work] 

# for signal in S_ALL: 

# done_signal = getattr(work, "_done_ " + signal, False) 

# if not done_sig: 

# cbk_name = "_on_" + str(signal) 

# cbk = getattr(work, cbk_name, None) 

# if cbk is None: continue 

# slot[work][signal].append(cbk) 

# print("connecting %s\nwith sender %s, signal %s" % (str(cbk), dep.node, dep.node.S_OK)) 

# dispatcher.connect(self.on_dep_ok, signal=signal, sender=dep.node, weak=False) 

 

# Register the callbacks for the Tasks. 

#self.show_receivers() 

 

def disconnect_signals(self): 

"""Disable the signals within the `Flow`.""" 

# Disconnect the signals inside each Work. 

for work in self: 

work.disconnect_signals() 

 

# Disable callbacks. 

for cbk in self._callbacks: 

cbk.disable() 

 

def show_receivers(self, sender=None, signal=None): 

sender = sender if sender is not None else dispatcher.Any 

signal = signal if signal is not None else dispatcher.Any 

print("*** live receivers ***") 

for rec in dispatcher.liveReceivers(dispatcher.getReceivers(sender, signal)): 

print("receiver -->", rec) 

print("*** end live receivers ***") 

 

def set_spectator_mode(self, mode=True): 

""" 

When the flow is in spectator_mode, we have to disable signals, pickle dump and possible callbacks 

A spectator can still operate on the flow but the new status of the flow won't be saved in 

the pickle file. Usually the flow is in spectator mode when we are already running it via 

the scheduler or other means and we should not interfere with its evolution. 

This is the reason why signals and callbacks must be disabled. 

Unfortunately preventing client-code from calling methods with side-effects when 

the flow is in spectator mode is not easy (e.g. flow.cancel will cancel the tasks submitted to the 

queue and the flow used by the scheduler won't see this change! 

""" 

# Set the flags of all the nodes in the flow. 

mode = bool(mode) 

self.in_spectator_mode = mode 

for node in self.iflat_nodes(): 

node.in_spectator_mode = mode 

 

# connect/disconnect signals depending on mode. 

if not mode: 

self.connect_signals() 

else: 

self.disconnect_signals() 

 

#def get_results(self, **kwargs) 

 

def rapidfire(self, check_status=True, **kwargs): 

""" 

Use :class:`PyLauncher` to submits tasks in rapidfire mode. 

kwargs contains the options passed to the launcher. 

 

Return: 

number of tasks submitted. 

""" 

self.check_pid_file() 

self.set_spectator_mode(False) 

if check_status: self.check_status() 

from .launcher import PyLauncher 

return PyLauncher(self, **kwargs).rapidfire() 

 

def single_shot(self, check_status=True, **kwargs): 

""" 

Use :class:`PyLauncher` to submits one task. 

kwargs contains the options passed to the launcher. 

 

Return: 

number of tasks submitted. 

""" 

self.check_pid_file() 

self.set_spectator_mode(False) 

if check_status: self.check_status() 

from .launcher import PyLauncher 

return PyLauncher(self, **kwargs).single_shot() 

 

def make_scheduler(self, **kwargs): 

""" 

Build a return a :class:`PyFlowScheduler` to run the flow. 

 

Args: 

kwargs: if empty we use the user configuration file. 

if `filepath` in kwargs we init the scheduler from filepath. 

else pass **kwargs to :class:`PyFlowScheduler` __init__ method. 

""" 

from .launcher import PyFlowScheduler 

if not kwargs: 

# User config if kwargs is empty 

sched = PyFlowScheduler.from_user_config() 

else: 

# Use from_file if filepath if present, else call __init__ 

filepath = kwargs.pop("filepath", None) 

if filepath is not None: 

assert not kwargs 

sched = PyFlowScheduler.from_file(filepath) 

else: 

sched = PyFlowScheduler(**kwargs) 

 

sched.add_flow(self) 

return sched 

 

def batch(self, timelimit=None): 

""" 

Run the flow in batch mode, return exit status of the job script. 

Requires a manager.yml file and a batch_adapter adapter. 

 

Args: 

timelimit: Time limit (int with seconds or string with time given with the slurm convention: 

"days-hours:minutes:seconds"). If timelimit is None, the default value specified in the 

`batch_adapter` entry of `manager.yml` is used. 

""" 

from .launcher import BatchLauncher 

# Create a batch dir from the flow.workdir. 

prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:-1]) 

prev_dir = os.path.join(os.path.sep, prev_dir) 

workdir = os.path.join(prev_dir, os.path.basename(self.workdir) + "_batch") 

 

return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit) 

 

def make_light_tarfile(self, name=None): 

"""Lightweight tarball file. Mainly used for debugging. Return the name of the tarball file.""" 

name = os.path.basename(self.workdir) + "-light.tar.gz" if name is None else name 

return self.make_tarfile(name=name, exclude_dirs=["outdata", "indata", "tmpdata"]) 

 

def make_tarfile(self, name=None, max_filesize=None, exclude_exts=None, exclude_dirs=None, verbose=0, **kwargs): 

""" 

Create a tarball file. 

 

Args: 

name: Name of the tarball file. Set to os.path.basename(`flow.workdir`) + "tar.gz"` if name is None. 

max_filesize (int or string with unit): a file is included in the tar file if its size <= max_filesize 

Can be specified in bytes e.g. `max_files=1024` or with a string with unit e.g. `max_filesize="1 Mb"`. 

No check is done if max_filesize is None. 

exclude_exts: List of file extensions to be excluded from the tar file. 

exclude_dirs: List of directory basenames to be excluded. 

verbose (int): Verbosity level. 

kwargs: keyword arguments passed to the :class:`TarFile` constructor. 

 

Returns: 

The name of the tarfile. 

""" 

def any2bytes(s): 

"""Convert string or number to memory in bytes.""" 

if is_string(s): 

return int(Memory.from_string(s).to("b")) 

else: 

return int(s) 

 

if max_filesize is not None: 

max_filesize = any2bytes(max_filesize) 

 

if exclude_exts: 

# Add/remove ".nc" so that we can simply pass "GSR" instead of "GSR.nc" 

# Moreover this trick allows one to treat WFK.nc and WFK file on the same footing. 

exts = [] 

for e in list_strings(exclude_exts): 

exts.append(e) 

if e.endswith(".nc"): 

exts.append(e.replace(".nc", "")) 

else: 

exts.append(e + ".nc") 

exclude_exts = exts 

 

def filter(tarinfo): 

""" 

Function that takes a TarInfo object argument and returns the changed TarInfo object. 

If it instead returns None the TarInfo object will be excluded from the archive. 

""" 

# Skip links. 

if tarinfo.issym() or tarinfo.islnk(): 

if verbose: print("Excluding link: %s" % tarinfo.name) 

return None 

 

# Check size in bytes 

if max_filesize is not None and tarinfo.size > max_filesize: 

if verbose: print("Excluding %s due to max_filesize" % tarinfo.name) 

return None 

 

# Filter filenames. 

if exclude_exts and any(tarinfo.name.endswith(ext) for ext in exclude_exts): 

if verbose: print("Excluding %s due to extension" % tarinfo.name) 

return None 

 

# Exlude directories (use dir basenames). 

if exclude_dirs and any(dir_name in exclude_dirs for dir_name in tarinfo.name.split(os.path.sep)): 

if verbose: print("Excluding %s due to exclude_dirs" % tarinfo.name) 

return None 

 

return tarinfo 

 

back = os.getcwd() 

os.chdir(os.path.join(self.workdir, "..")) 

 

import tarfile 

name = os.path.basename(self.workdir) + ".tar.gz" if name is None else name 

with tarfile.open(name=name, mode='w:gz', **kwargs) as tar: 

tar.add(os.path.basename(self.workdir), arcname=None, recursive=True, exclude=None, filter=filter) 

 

# Add the script used to generate the flow. 

if self.pyfile is not None and os.path.exists(self.pyfile): 

tar.add(self.pyfile) 

 

os.chdir(back) 

return name 

 

#def abirobot(self, ext, check_status=True, nids=None): 

# """ 

# Builds and return the :class:`Robot` subclass from the file extension `ext`. 

# `nids` is an optional list of node identifiers used to filter the tasks in the flow. 

# """ 

# from abipy.abilab import abirobot 

# if check_status: self.check_status() 

# return abirobot(flow=self, ext=ext, nids=nids): 

 

def plot_networkx(self, mode="network", with_edge_labels=False, 

node_size="num_cores", node_label="name_class", layout_type="spring", 

**kwargs): 

""" 

Use networkx to draw the flow with the connections among the nodes and 

the status of the tasks. 

 

.. warning:: 

 

Requires networkx package. 

""" 

if not self.allocated: self.allocate() 

 

import networkx as nx 

 

# Build the graph 

g, edge_labels = nx.Graph(), {} 

tasks = list(self.iflat_tasks()) 

for task in tasks: 

g.add_node(task, name=task.name) 

for child in task.get_children(): 

g.add_edge(task, child) 

# TODO: Add getters! What about locked nodes! 

i = [dep.node for dep in child.deps].index(task) 

edge_labels[(task, child)] = " ".join(child.deps[i].exts) 

 

# Get positions for all nodes using layout_type. 

# e.g. pos = nx.spring_layout(g) 

pos = getattr(nx, layout_type + "_layout")(g) 

 

# Select function used to compute the size of the node 

make_node_size = dict( 

num_cores=lambda task: 300 * task.manager.num_cores 

)[node_size] 

 

# Select function used to build the label 

make_node_label = dict( 

name_class=lambda task: task.pos_str + "\n" + task.__class__.__name__, 

)[node_label] 

 

labels = {task: make_node_label(task) for task in tasks} 

 

import matplotlib.pyplot as plt 

 

# Select plot type. 

if mode == "network": 

nx.draw_networkx(g, pos, labels=labels, 

#node_color='#A0CBE2', 

# FIXME: This does not work as expected. Likely bug in networkx! 

node_color=[task.color_rgb for task in tasks], 

node_size=[make_node_size(task) for task in tasks], 

width=2, style="dotted", with_labels=True) 

 

# Draw edge labels 

if with_edge_labels: 

nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels) 

 

elif mode == "status": 

# Group tasks by status. 

for status in self.ALL_STATUS: 

tasks = list(self.iflat_tasks(status=status)) 

 

# Draw nodes (color is given by status) 

node_color = status.color_opts["color"] 

if node_color is None: node_color = "black" 

#print("num nodes %s with node_color %s" % (len(tasks), node_color)) 

 

nx.draw_networkx_nodes(g, pos, 

nodelist=tasks, 

node_color=node_color, 

node_size=[make_node_size(task) for task in tasks], 

alpha=0.5, 

#label=str(status), 

) 

 

# Draw edges. 

nx.draw_networkx_edges(g, pos, width=2.0, alpha=0.5, arrows=True) # edge_color='r') 

 

# Draw labels 

nx.draw_networkx_labels(g, pos, labels, font_size=12) 

 

# Draw edge labels 

if with_edge_labels: 

nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels) 

#label_pos=0.5, font_size=10, font_color='k', font_family='sans-serif', font_weight='normal', 

# alpha=1.0, bbox=None, ax=None, rotate=True, **kwds) 

 

else: 

raise ValueError("Unknown value for mode: %s" % mode) 

 

plt.axis('off') 

plt.show() 

 

 

class G0W0WithQptdmFlow(Flow): 

def __init__(self, workdir, scf_input, nscf_input, scr_input, sigma_inputs, manager=None): 

""" 

Build a :class:`Flow` for one-shot G0W0 calculations. 

The computation of the q-points for the screening is parallelized with qptdm 

i.e. we run independent calculations for each q-point and then we merge the final results. 

 

Args: 

workdir: Working directory. 

scf_input: Input for the GS SCF run. 

nscf_input: Input for the NSCF run (band structure run). 

scr_input: Input for the SCR run. 

sigma_inputs: Input(s) for the SIGMA run(s). 

manager: :class:`TaskManager` object used to submit the jobs 

Initialized from manager.yml if manager is None. 

""" 

super(G0W0WithQptdmFlow, self).__init__(workdir, manager=manager) 

 

# Register the first work (GS + NSCF calculation) 

bands_work = self.register_work(BandStructureWork(scf_input, nscf_input)) 

 

# Register the callback that will be executed the work for the SCR with qptdm. 

scr_work = self.register_work_from_cbk(cbk_name="cbk_qptdm_workflow", cbk_data={"input": scr_input}, 

deps={bands_work.nscf_task: "WFK"}, work_class=QptdmWork) 

 

# The last work contains a list of SIGMA tasks 

# that will use the data produced in the previous two works. 

if not isinstance(sigma_inputs, (list, tuple)): 

sigma_inputs = [sigma_inputs] 

 

sigma_work = Work() 

for sigma_input in sigma_inputs: 

sigma_work.register_sigma_task(sigma_input, deps={bands_work.nscf_task: "WFK", scr_work: "SCR"}) 

self.register_work(sigma_work) 

 

self.allocate() 

 

def cbk_qptdm_workflow(self, cbk): 

""" 

This callback is executed by the flow when bands_work.nscf_task reaches S_OK. 

 

It computes the list of q-points for the W(q,G,G'), creates nqpt tasks 

in the second work (QptdmWork), and connect the signals. 

""" 

scr_input = cbk.data["input"] 

# Use the WFK file produced by the second 

# Task in the first Work (NSCF step). 

nscf_task = self[0][1] 

wfk_file = nscf_task.outdir.has_abiext("WFK") 

 

work = self[1] 

work.set_manager(self.manager) 

work.create_tasks(wfk_file, scr_input) 

work.add_deps(cbk.deps) 

 

work.set_flow(self) 

# Each task has a reference to its work. 

for task in work: 

task.set_work(work) 

# Add the garbage collector. 

if self.gc is not None: task.set_gc(self.gc) 

 

work.connect_signals() 

work.build() 

 

return work 

 

 

class FlowCallbackError(Exception): 

"""Exceptions raised by FlowCallback.""" 

 

 

class FlowCallback(object): 

""" 

This object implements the callbacks executed by the :class:`flow` when 

particular conditions are fulfilled. See on_dep_ok method of :class:`Flow`. 

 

.. note:: 

 

I decided to implement callbacks via this object instead of a standard 

approach based on bound methods because: 

 

1) pickle (v<=3) does not support the pickling/unplickling of bound methods 

 

2) There's some extra logic and extra data needed for the proper functioning 

of a callback at the flow level and this object provides an easy-to-use interface. 

""" 

Error = FlowCallbackError 

 

def __init__(self, func_name, flow, deps, cbk_data): 

""" 

Args: 

func_name: String with the name of the callback to execute. 

func_name must be a bound method of flow with signature: 

 

func_name(self, cbk) 

 

where self is the Flow instance and cbk is the callback 

flow: Reference to the :class:`Flow` 

deps: List of dependencies associated to the callback 

The callback is executed when all dependencies reach S_OK. 

cbk_data: Dictionary with additional data that will be passed to the callback via self. 

""" 

self.func_name = func_name 

self.flow = flow 

self.deps = deps 

self.data = cbk_data or {} 

self._disabled = False 

 

def __str__(self): 

return "%s: %s bound to %s" % (self.__class__.__name__, self.func_name, self.flow) 

 

def __call__(self): 

"""Execute the callback.""" 

if self.can_execute(): 

# Get the bound method of the flow from func_name. 

# We use this trick because pickle (format <=3) does not support bound methods. 

try: 

func = getattr(self.flow, self.func_name) 

except AttributeError as exc: 

raise self.Error(str(exc)) 

 

return func(self) 

 

else: 

raise self.Error("You tried to __call_ a callback that cannot be executed!") 

 

def can_execute(self): 

"""True if we can execute the callback.""" 

return not self._disabled and all(dep.status == dep.node.S_OK for dep in self.deps) 

 

def disable(self): 

""" 

True if the callback has been disabled. This usually happens when the callback has been executed. 

""" 

self._disabled = True 

 

def enable(self): 

"""Enable the callback""" 

self._disabled = False 

 

def handle_sender(self, sender): 

""" 

True if the callback is associated to the sender 

i.e. if the node who sent the signal appears in the 

dependencies of the callback. 

""" 

return sender in [d.node for d in self.deps] 

 

 

# Factory functions. 

def bandstructure_flow(workdir, scf_input, nscf_input, dos_inputs=None, manager=None, flow_class=Flow, allocate=True): 

""" 

Build a :class:`Flow` for band structure calculations. 

 

Args: 

workdir: Working directory. 

scf_input: Input for the GS SCF run. 

nscf_input: Input for the NSCF run (band structure run). 

dos_inputs: Input(s) for the NSCF run (dos run). 

manager: :class:`TaskManager` object used to submit the jobs 

Initialized from manager.yml if manager is None. 

flow_class: Flow subclass 

allocate: True if the flow should be allocated before returning. 

 

Returns: 

:class:`Flow` object 

""" 

flow = flow_class(workdir, manager=manager) 

work = BandStructureWork(scf_input, nscf_input, dos_inputs=dos_inputs) 

flow.register_work(work) 

 

# Handy aliases 

flow.scf_task, flow.nscf_task, flow.dos_tasks = work.scf_task, work.nscf_task, work.dos_tasks 

 

if allocate: flow.allocate() 

return flow 

 

 

def g0w0_flow(workdir, scf_input, nscf_input, scr_input, sigma_inputs, manager=None, flow_class=Flow, allocate=True): 

""" 

Build a :class:`Flow` for one-shot $G_0W_0$ calculations. 

 

Args: 

workdir: Working directory. 

scf_input: Input for the GS SCF run. 

nscf_input: Input for the NSCF run (band structure run). 

scr_input: Input for the SCR run. 

sigma_inputs: List of inputs for the SIGMA run. 

flow_class: Flow class 

manager: :class:`TaskManager` object used to submit the jobs. 

Initialized from manager.yml if manager is None. 

allocate: True if the flow should be allocated before returning. 

 

Returns: 

:class:`Flow` object 

""" 

flow = flow_class(workdir, manager=manager) 

work = G0W0Work(scf_input, nscf_input, scr_input, sigma_inputs) 

flow.register_work(work) 

if allocate: flow.allocate() 

return flow 

 

 

class PhononFlow(Flow): 

""" 

1) One workflow for the GS run. 

 

2) nqpt works for phonon calculations. Each work contains 

nirred tasks where nirred is the number of irreducible phonon perturbations 

for that particular q-point. 

""" 

@classmethod 

def from_scf_input(cls, workdir, scf_input, ph_ngqpt, with_becs=True, manager=None, allocate=True): 

""" 

Create a `PhononFlow` for phonon calculations from an `AbinitInput` defining a ground-state run. 

 

Args: 

workdir: Working directory of the flow. 

scf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run. 

ph_ngqpt: q-mesh for phonons. Must be a sub-mesh of the k-mesh used for 

electrons. e.g if ngkpt = (8, 8, 8). ph_ngqpt = (4, 4, 4) is a valid choice 

whereas ph_ngqpt = (3, 3, 3) is not! 

with_becs: True if Born effective charges are wanted. 

manager: :class:`TaskManager` object. Read from `manager.yml` if None. 

allocate: True if the flow should be allocated before returning. 

 

Return: 

:class:`PhononFlow` object. 

""" 

flow = cls(workdir, manager=manager) 

 

# Register the SCF task 

flow.register_scf_task(scf_input) 

scf_task = flow[0][0] 

 

# Make sure k-mesh and q-mesh are compatible. 

scf_ngkpt, ph_ngqpt = np.array(scf_input["ngkpt"]), np.array(ph_ngqpt) 

 

if any(scf_ngkpt % ph_ngqpt != 0): 

raise ValueError("ph_ngqpt %s should be a sub-mesh of scf_ngkpt %s" % (ph_ngqpt, scf_ngkpt)) 

 

# Get the q-points in the IBZ from Abinit 

qpoints = scf_input.abiget_ibz(ngkpt=ph_ngqpt, shiftk=(0,0,0), kptopt=1).points 

 

# Create a PhononWork for each q-point. Add DDK and E-field if q == Gamma and with_becs. 

for qpt in qpoints: 

if np.allclose(qpt, 0) and with_becs: 

ph_work = BecWork.from_scf_task(scf_task) 

else: 

ph_work = PhononWork.from_scf_task(scf_task, qpt=qpt) 

 

flow.register_work(ph_work) 

 

if allocate: flow.allocate() 

 

return flow 

 

def open_final_ddb(self): 

""" 

Open the DDB file located in the output directory of the flow. 

 

Return: 

:class:`DdbFile` object, None if file could not be found or file is not readable. 

""" 

ddb_path = self.outdir.has_abiext("DDB") 

if not ddb_path: 

if self.status == self.S_OK: 

logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir)) 

return None 

 

from abipy.dfpt.ddb import DdbFile 

try: 

return DdbFile(ddb_path) 

except Exception as exc: 

logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc))) 

return None 

 

def finalize(self): 

"""This method is called when the flow is completed.""" 

# Merge all the out_DDB files found in work.outdir. 

ddb_files = list(filter(None, [work.outdir.has_abiext("DDB") for work in self])) 

 

# Final DDB file will be produced in the outdir of the work. 

out_ddb = self.outdir.path_in("out_DDB") 

desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime()) 

 

mrgddb = wrappers.Mrgddb(manager=self.manager, verbose=0) 

mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc) 

 

print("Final DDB file available at %s" % out_ddb) 

 

# Call the method of the super class. 

retcode = super(PhononFlow, self).finalize() 

print("retcode", retcode) 

#if retcode != 0: return retcode 

return retcode 

 

 

def phonon_flow(workdir, scf_input, ph_inputs, with_nscf=False, with_ddk=False, with_dde=False, 

manager=None, flow_class=PhononFlow, allocate=True): 

""" 

Build a :class:`PhononFlow` for phonon calculations. 

 

Args: 

workdir: Working directory. 

scf_input: Input for the GS SCF run. 

ph_inputs: List of Inputs for the phonon runs. 

with_nscf: add an nscf task in front of al phonon tasks to make sure the q point is covered 

with_ddk: add the ddk step 

with_dde: add the dde step it the dde is set ddk is switched on automatically 

manager: :class:`TaskManager` used to submit the jobs 

Initialized from manager.yml if manager is None. 

flow_class: Flow class 

 

Returns: 

:class:`Flow` object 

""" 

logger.critical("phonon_flow is deprecated and could give wrong results") 

if with_dde: 

with_ddk = True 

 

natom = len(scf_input.structure) 

 

# Create the container that will manage the different works. 

flow = flow_class(workdir, manager=manager) 

 

# Register the first work (GS calculation) 

# register_task creates a work for the task, registers it to the flow and returns the work 

# the 0the element of the work is the task 

scf_task = flow.register_task(scf_input, task_class=ScfTask)[0] 

 

# Build a temporary work with a shell manager just to run 

# ABINIT to get the list of irreducible pertubations for this q-point. 

shell_manager = flow.manager.to_shell_manager(mpi_procs=1) 

 

if with_ddk: 

logger.info('add ddk') 

# TODO 

# MG Warning: be careful here because one should use tolde or tolwfr (tolvrs shall not be used!) 

ddk_input = ph_inputs[0].deepcopy() 

ddk_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2, rfdir=[1, 1, 1]) 

ddk_task = flow.register_task(ddk_input, deps={scf_task: 'WFK'}, task_class=DdkTask)[0] 

 

if with_dde: 

logger.info('add dde') 

dde_input = ph_inputs[0].deepcopy() 

dde_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2) 

dde_input_idir = dde_input.deepcopy() 

dde_input_idir.set_vars(rfdir=[1, 1, 1]) 

dde_task = flow.register_task(dde_input, deps={scf_task: 'WFK', ddk_task: 'DDK'}, task_class=DdeTask)[0] 

 

if not isinstance(ph_inputs, (list, tuple)): 

ph_inputs = [ph_inputs] 

 

for i, ph_input in enumerate(ph_inputs): 

fake_input = ph_input.deepcopy() 

 

# Run abinit on the front-end to get the list of irreducible pertubations. 

tmp_dir = os.path.join(workdir, "__ph_run" + str(i) + "__") 

#import tempfile 

#tmp_dir = tempfile.mkdtemp() 

w = PhononWork(workdir=tmp_dir, manager=shell_manager) 

fake_task = w.register(fake_input) 

 

# Use the magic value paral_rf = -1 to get the list of irreducible perturbations for this q-point. 

abivars = dict( 

paral_rf=-1, 

rfatpol=[1, natom], # Set of atoms to displace. 

rfdir=[1, 1, 1], # Along this set of reduced coordinate axis. 

) 

 

fake_task._set_inpvars(abivars) 

w.allocate() 

w.start(wait=True) 

 

# Parse the file to get the perturbations. 

try: 

irred_perts = yaml_read_irred_perts(fake_task.log_file.path) 

except: 

print("Error in %s" % fake_task.log_file.path) 

raise 

 

logger.info(irred_perts) 

 

w.rmtree() 

 

# Now we can build the final list of works: 

# One work per q-point, each work computes all 

# the irreducible perturbations for a singe q-point. 

 

work_qpt = PhononWork() 

 

if with_nscf: 

# MG: Warning this code assume 0 is Gamma! 

nscf_input = copy.deepcopy(scf_input) 

nscf_input.set_vars(kptopt=3, iscf=-3, qpt=irred_perts[0]['qpt'], nqpt=1) 

nscf_task = work_qpt.register_nscf_task(nscf_input, deps={scf_task: "DEN"}) 

deps = {nscf_task: "WFQ", scf_task: "WFK"} 

else: 

deps = {scf_task: "WFK"} 

 

if with_ddk: 

deps[ddk_task] = 'DDK' 

 

logger.info(irred_perts[0]['qpt']) 

 

for irred_pert in irred_perts: 

#print(irred_pert) 

new_input = ph_input.deepcopy() 

 

#rfatpol 1 1 # Only the first atom is displaced 

#rfdir 1 0 0 # Along the first reduced coordinate axis 

qpt = irred_pert["qpt"] 

idir = irred_pert["idir"] 

ipert = irred_pert["ipert"] 

 

# TODO this will work for phonons, but not for the other types of perturbations. 

rfdir = 3 * [0] 

rfdir[idir -1] = 1 

rfatpol = [ipert, ipert] 

 

new_input.set_vars( 

#rfpert=1, 

qpt=qpt, 

rfdir=rfdir, 

rfatpol=rfatpol, 

) 

 

if with_ddk: 

new_input.set_vars(rfelfd=3) 

 

work_qpt.register_phonon_task(new_input, deps=deps) 

 

flow.register_work(work_qpt) 

 

if allocate: flow.allocate() 

 

return flow 

 

 

def phonon_conv_flow(workdir, scf_input, qpoints, params, manager=None, allocate=True): 

""" 

Create a :class:`Flow` to perform convergence studies for phonon calculations. 

 

Args: 

workdir: Working directory of the flow. 

scf_input: :class:`AbinitInput` object defining a GS-SCF calculation. 

qpoints: List of list of lists with the reduced coordinates of the q-point(s). 

params: 

To perform a converge study wrt ecut: params=["ecut", [2, 4, 6]] 

manager: :class:`TaskManager` object responsible for the submission of the jobs. 

If manager is None, the object is initialized from the yaml file 

located either in the working directory or in the user configuration dir. 

allocate: True if the flow should be allocated before returning. 

 

Return: 

:class:`Flow` object. 

""" 

qpoints = np.reshape(qpoints, (-1, 3)) 

 

flow = Flow(workdir=workdir, manager=manager) 

 

for qpt in qpoints: 

for gs_inp in scf_input.product(*params): 

# Register the SCF task 

work = flow.register_scf_task(gs_inp) 

 

# Add the PhononWork connected to this scf_task. 

flow.register_work(PhononWork.from_scf_task(work[0], qpt=qpt)) 

 

if allocate: flow.allocate() 

return flow