This file is indexed.

/usr/lib/python2.7/dist-packages/dolfin_utils/pjobs/pjobs.py is in python-dolfin 2016.2.0-2.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
# -*- coding: utf-8 -*-
#!/usr/bin/env python

# Copyright (C) 2009 Martin Sandve Alnæs
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.

# Modified Johan Hake 2011
# Modified Johannes Ring 2013

from __future__ import print_function

from six import string_types

import torque
import sge
import slurm
import os
import subprocess
import re

def check_mem_argument(mem, backend):
    if backend == "slurm":
        allowed_mem_suffices = ["k","m","g","K","M","G"]
    else:
        allowed_mem_suffices = ["kb","mb","gb"]
    if not mem is None:
        assert isinstance(mem, string_types)
        match = re.match("(\d+)(\w+)", mem)
        if not match:
            return False
        if match.groups()[1] not in allowed_mem_suffices:
            return False
        try:
            memory = int(match.groups()[0])
            if memory < 0:
                return False
        except:
            return False
    return True

def sjoin(*items):
    "Join str() of all arguments."
    return "".join(map(str,items))


def submit(jobs, nodes=1, ppn=1, walltime=3, mem=None, vmem=None, name="myjob", \
           workdir=".", email=None, paths=(), setup="", \
           keep_environment=True, serial=None, nice=None, dryrun=False, \
           queue=None, parallel_environment=None, backend="torque"): 
    """Submit a sequence of commands to the queue using qsub/sbatch.
    
    Example use:
        from pjobs import submit, sjoin
        for n in (10,20,30):
            for h in (0.1, 0.01, 0.005):
                dt = 0.1*h
                jobs.append("pmpirun.openmpi mysimulator -n%d -h%f -dt%f" % (n, h, dt))
        submit(jobs, nodes=4, ppn=8, walltime=24*7)
    
    Arguments:
    @param jobs:
        A list of commands (strings) to pass to the queue for separate execution.
    @param nodes:
        Number of compute nodes to use.
        Default is 1 node.
    @param ppn:
        Number of processors per node to use.
        Default is 1 processor.
    @param walltime:
        Maximum amount of real-life time the job can use, in hours.
        Default is 3.
    @param mem:
        Optional maximum amount of physical memory the process will need.
        The syntax is mem='100mb', or mem='4gb', with allowed suffices 
        one of 'kb', 'mb' or 'gb' for the torque and the sge backends, and
        one of 'k', 'm', 'g', 'K', 'M', 'G' for the slurm backend. Only int
        values is allowed.
    @param vmem:
        Optional maximum amount of virtual memory the process will need.
        For syntax see mem.
        (not available for the slurm backend)
    @param name:
        Basename of the job, just for the queue system.
        If a single string this will be combined with a
        counter to separate multiple jobs. A list or
        tuple of strings can be provided instead to
        define separate names for each job.
        Default is 'myjob'.
    @param email:
        Your email address if you want email when
        the jobs are started and stopped.
    @param workdir:
        The directory the jobs will be run in.
        Default is the place you commit the job from
    @param paths:
        A list of directories to add to the $PATH.
    @param setup:
        Optional additional job script contents for setting 
        up e.g. environment variables before each job.
    @param keep_environment:
        True if you want to copy all current environment
        variables to the job environment (PBS -V option).
        Default True.
    @param serial:
        True if you want to run the jobs as a serial sequence of processes
        instead of passing them to the qsub queue.
        Default is False, unless the environment variable PJOBS_SERIAL=1 
        is defined in which case the default is True.
    @param nice:
        Optional nice value (int) used for all jobs when running in serial.
        If provided, "nice -n %d" % nice is added before each job command.
    @param dryrun:
        True if you want to test this script but not send jobs to the queue.
    @param queue:
        Set a particular queue to be used when submitting
        (only available for the sge backend)
    @param parallel_environment:
        Set the parallel environment (PE) of the submitting backend 
        (only available for the sge backend)
    @param backend:
        Set the name of the actuall submitter.
        Available backends: 'torque' (default), 'sge' and 'slurm'
    """
    if isinstance(jobs, string_types):
        jobs = [jobs]
    assert all(isinstance(job, string_types) for job in jobs)
    assert isinstance(nodes, int)
    assert isinstance(ppn, int)
    assert isinstance(walltime, (int, float))
    if not check_mem_argument(mem, backend):
        raise TypeError("Wrong format of mem attribute, see docstring") 
    if not check_mem_argument(vmem, backend):
        raise TypeError("Wrong format of vmem attribute, see docstring")
    assert isinstance(name, string_types + (list, tuple))
    assert isinstance(workdir, string_types)
    assert email is None or isinstance(email, string_types)
    if serial is None:
        # Get environment variable to toggle serial behaviour
        serial = bool(os.environ.get("PJOBS_SERIAL", False))
    assert isinstance(serial, bool)

    # Check queue and PE
    queue = queue or ""
    parallel_environment = parallel_environment or ""
    assert isinstance(queue, string_types)
    assert isinstance(parallel_environment, string_types)

    # Check backend
    assert isinstance(backend, string_types)
    assert (backend in ["torque", "sge", "slurm"])
    
    # Shared setup
    paths = "PATH=$PATH:" + ":".join(paths) if paths else ""
    
    # Modify options to make sense in serial
    if serial:
        if paths:
            print("TODO: Not using paths for serial job.")
        if setup:
            print("TODO: Not using setup for serial job.")
        if workdir == ".":
            workdir = os.getcwd()
        workdir = os.path.abspath(workdir)
        if nice is not None:
            jobs = ["nice -n %d %s" % (nice, job) for job in jobs]
    else:
        workdir = os.path.abspath(workdir)
    
    # Define range of names if only a single name is provided
    if isinstance(name, string_types):
        names = ["%s_%d" % (name, k) for k in range(len(jobs))]
    else:
        names = name

    # Enter workdir
    origdir = os.getcwd()
    try:
        if serial:
            os.chdir(workdir)
        # Queue sequence of jobs
        for job, jobname in zip(jobs, names):
            
            if serial:
                # Execute job in separate process
                cmd = job
                if dryrun:
                    print("NOT calling job command:", cmd)
                else:
                    print("Calling job command:", cmd)
                    # Pass process output to file f
                    #f = open("%s_stdout" % jobname, "w")
                    subprocess.call(cmd.split())
                    #f.close()
            else:
                job_script = globals()[backend].job_script(job, jobname, nodes, \
                                                           ppn, walltime, mem, \
                                                           vmem, workdir, email, paths,\
                                                           setup, keep_environment,\
                                                           queue, parallel_environment)
                
                # Write job script to file
                scriptfilename = "run_%s.sh" % jobname
                f = open(scriptfilename, "w")
                f.write(job_script)
                f.close()
                print("Wrote file ", scriptfilename)
                
                # Queue the generated job script
                if backend == "slurm":
                    cmd = ["sbatch"] + [scriptfilename]
                else:
                    cmd = ["qsub"] + [scriptfilename]
                if dryrun:
                    print("NOT calling queue command:", " ".join(cmd))
                else:
                    print("Calling queue command:", " ".join(cmd))
                    subprocess.call(cmd)
                    os.unlink(scriptfilename)
    finally:
        os.chdir(origdir)