Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
N
NP_TMcode
Manage
Activity
Members
Plan
Wiki
Code
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Deploy
Releases
Container Registry
Analyze
Contributor analytics
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Giacomo Mulas
NP_TMcode
Commits
30e5a614
Commit
30e5a614
authored
1 year ago
by
Giovanni La Mura
Browse files
Options
Downloads
Patches
Plain Diff
Implement output comparison script
parent
3dbaa98b
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
src/scripts/pycompare
+259
-0
259 additions, 0 deletions
src/scripts/pycompare
with
259 additions
and
0 deletions
src/scripts/pycompare
0 → 100755
+
259
−
0
View file @
30e5a614
#!/bin/python
import re
from math import log10
from sys import argv
number_reg = re.compile(r'-?[0-9]\.[0-9]+E[-+][0-9]{2,2}')
def main():
config = parse_arguments()
errors, warnings, noisy = (0, 0, 0)
if config['help_mode'] or len(argv) == 1:
config['help_mode'] = True
print_help()
else:
compare_log = compare_files(config)
errors = compare_log['errors']
warnings = compare_log['warnings']
noisy = compare_log['noisy']
print("ERROR COUNT: %d"%errors)
print("WARNING COUNT: %d"%warnings)
print("NOISE COUNT: %d"%noisy)
if (errors > 0):
print("FAILURE: {0:s} is not consistent with {1:s}".format(
config['c_file_name'], config['fortran_file_name']
))
else:
if (not config['help_mode']):
print("SUCCESS: {0:s} is consistent with {1:s}".format(
config['c_file_name'], config['fortran_file_name']
))
return errors
def compare_files(config):
mismatch_count = {
'errors': 0,
'warnings': 0,
'noisy': 0,
}
fortran_file = open(config['fortran_file_name'], 'r')
c_file = open(config['c_file_name'], 'r')
l_file = None
f_lines = fortran_file.readlines()
c_lines = c_file.readlines()
fortran_file.close()
c_file.close()
if (len(f_lines) == len(c_lines)):
line_count = len(f_lines)
num_len = 1
if (line_count > 0):
num_len = int(log10(line_count)) + 1
if (config['log_html']):
l_file = open(config['html_output'], 'w')
l_file.write("
<!DOCTYPE html>
\n")
l_file.write("
<html
xmnls=
\"http://www.w3.org/1999/xhtml\"
>
\n")
l_file.write("
<header>
\n")
l_file.write(
"
<h1>
Comparison between {0:s} and {1:s}
</h1>
\n".format(
config['fortran_file_name'], config['c_file_name']
)
)
l_file.write("
</header>
\n")
l_file.write("
<body>
\n")
l_file.write("
<div>
Numeric noise is marked
<span
style=
\"font-weight:
bold
;
color:
rgb
(0,255,0)\"
>
"
+ "GREEN
</span>
, warnings are marked
<span
style=
\"font-weight:
bold
;
color:
rgb
(0,0,255)\"
>
"
+ "BLUE
</span>
and errors are marked
<span
style=
\"font-weight:
bold
;
color:
rgb
(255,0,0)\"
>
"
+ "RED
</span>
.
</div>
\n")
for li in range(line_count):
line_result = compare_lines(f_lines[li], c_lines[li], config, li + 1, num_len, l_file)
mismatch_count['errors'] += line_result[0]
mismatch_count['warnings'] += line_result[1]
mismatch_count['noisy'] += line_result[2]
if (mismatch_count['errors'] > 0 and not config['check_all']):
print("INFO: mismatch found at line %d"%(li + 1))
break
if l_file is not None:
l_file.write("
</body>
\n")
l_file.write("
</html>
\n")
l_file.close()
return mismatch_count
def compare_lines(f_line, c_line, config, line_num=0, num_len=1, log_file=None):
errors = 0
warnings = 0
noisy = 0
f_line = f_line.replace("D-","E-").replace("D+","E+")
if (f_line == c_line):
if log_file is not None:
num_format = "
<div><pre><code>
{0:0%dd}"%num_len
log_line = (num_format + ": {1:s}
</code></pre></div>
\n").format(line_num, c_line[:-1])
log_file.write(log_line)
else:
iter_f_values = number_reg.finditer(f_line)
iter_c_values = number_reg.finditer(c_line)
f_starts, f_ends, f_groups = [], [], []
c_starts, c_ends, c_groups = [], [], []
for fi in iter_f_values:
f_starts.append(fi.start())
f_ends.append(fi.end())
f_groups.append(fi.group())
for ci in iter_c_values:
c_starts.append(ci.start())
c_ends.append(ci.end())
c_groups.append(ci.group())
severities = mismatch_severities(f_groups, c_groups, config)
if log_file is not None:
num_format = "
<div><pre><code>
{0:0%dd}"%num_len
log_line = (num_format + ": ").format(line_num)
log_line = log_line + c_line[0:c_starts[0]]
for si in range(len(severities) - 1):
if (severities[si] == 1): noisy += 1
elif (severities[si] == 2): warnings += 1
elif (severities[si] == 3): errors += 1
if log_file is not None:
if (severities[si] == 0):
log_line = log_line + c_groups[si] + c_line[c_ends[si]:c_starts[si + 1]]
elif (severities[si] == 1):
log_line = (
log_line + "
</code><span
style=
\"font-weight:
bold
;
color:
rgb
(0,255,0)\"
><code>
"
+ c_groups[si] + "
</code></span><code>
" + c_line[c_ends[si]:c_starts[si + 1]]
)
elif (severities[si] == 2):
log_line = (
log_line + "
</code><span
style=
\"font-weight:
bold
;
color:
rgb
(0,0,255)\"
><code>
"
+ c_groups[si] + "
</code></span><code>
" + c_line[c_ends[si]:c_starts[si + 1]]
)
elif (severities[si] == 3):
log_line = (
log_line + "
</code><span
style=
\"font-weight:
bold
;
color:
rgb
(255,0,0)\"
><code>
"
+ c_groups[si] + "
</code></span><code>
" + c_line[c_ends[si]:c_starts[si + 1]]
)
if (severities[-1] == 1): noisy += 1
elif (severities[-1] == 2): warnings += 1
elif (severities[-1] == 3): errors += 1
if log_file is not None:
if (severities[-1] == 0):
log_line = (
log_line + c_groups[-1] + c_line[c_ends[-1]:len(c_line) - 2]
)
elif (severities[-1] == 1):
log_line = (
log_line + "
</code><span
style=
\"font-weight:
bold
;
color:
rgb
(0,255,0)\"
><code>
"
+ c_groups[-1] + "
</code></span><code>
" + c_line[c_ends[-1]:len(c_line) - 2]
)
elif (severities[-1] == 2):
log_line = (
log_line + "
</code><span
style=
\"font-weight:
bold
;
color:
rgb
(0,0,255)\"
><code>
"
+ c_groups[-1] + "
</code></span><code>
" + c_line[c_ends[-1]:len(c_line) - 2]
)
elif (severities[-1] == 3):
log_line = (
log_line + "
</code><span
style=
\"font-weight:
bold
;
color:
rgb
(255,0,0)\"
><code>
"
+ c_groups[-1] + "
</code></span><code>
" + c_line[c_ends[-1]:len(c_line) - 2]
)
log_file.write(log_line + "
</code></pre></div>
\n")
return (errors, warnings, noisy)
def mismatch_severities(str_f_values, str_c_values, config):
"""Determine the severity of a numerical mismatch.
The severiti scale is currently designed with the following integer codes:
0 - the values are equal
1 - the values are subject to suspect numerical noise (green fonts)
2 - the values are different but below error threshold (blue fonts)
3 - the values differ more than error threshold (red fonts)
-----------
Parameters:
str_f_values: `array(string)`
The strings representing the numeric values read from the FORTRAN output
file.
str_c_values: `array(string)`
The strings representing the numeric values read from the C++ output file.
config: `dict`
A dictionary containing the configuration options from which to read the
warning and the error threshold.
"""
result = [0 for ri in range(len(str_f_values))]
for i in range(len(str_f_values)):
if (str_f_values[i] != str_c_values[i]):
f_values = [float(str_f_values[j]) for j in range(len(str_f_values))]
c_values = [float(str_c_values[j]) for j in range(len(str_c_values))]
f_log_values = [0.0 for j in range(len(f_values))]
c_log_values = [0.0 for j in range(len(c_values))]
max_f_log = -1.0e12
max_c_log = -1.0e12
for j in range(len(f_values)) :
if f_values[j]
<
0.0
:
f_values
[
j
]
*=
-1.0
if
c_values
[
j
]
<
0.0
:
c_values
[
j
]
*=
-1.0
f_log_values[j] =
log10(f_values[j])
if
f_values
[
j
]
>
0.0 else -999
c_log_values[j] = log10(c_values[j]) if c_values[j] > 0.0 else -999
if (f_log_values[j] > max_f_log): max_f_log = f_log_values[j]
if (c_log_values[j] > max_c_log): max_c_log = f_log_values[j]
if (c_log_values[i]
<
max_c_log
-
5.0
and
f_log_values
[
i
]
<
max_f_log
-
5.0)
:
result[i] =
1
else:
difference =
c_values[i]
-
f_values
[
i
]
fractional =
1.0
if
(
f_values
[
i
]
!=
0.0)
:
fractional =
difference
/
f_values
[
i
]
if
(
fractional
<
0.0)
:
fractional
*=
-1.0
if
(
fractional
<
config
['
warning_threshold
'])
:
result[i] =
2
else:
result[i] =
3
return
result
def
parse_arguments
()
:
config =
{
'
fortran_file_name
'
:
'',
'
c_file_name
'
:
'',
'
log_html
'
:
False
,
'
html_output
'
:
'
pycompare.html
',
'
warning_threshold
'
:
0.005,
'
help_mode
'
:
False
,
'
check_all
'
:
True
,
}
for
arg
in
argv
[1
:
]
:
split_arg =
arg.split("=")
if
(
arg.startswith
("
--ffile
"))
:
config
['
fortran_file_name
'
] =
split_arg[1]
elif
(
arg.startswith
("
--cfile
"))
:
config
['
c_file_name
'
] =
split_arg[1]
elif
(
arg.startswith
("
--html
"))
:
config
['
log_html
'
] =
True
elif
(
arg.startswith
("
--logname
"))
:
config
['
html_output
'
] =
split_arg[1]
elif
(
arg.startswith
("
--warn
"))
:
config
['
warning_threshold
'
] =
float(split_arg[1])
elif
(
arg.startswith
("
--help
"))
:
config
['
help_mode
'
] =
True
elif
(
arg.startswith
("
--quick
"))
:
config
['
check_all
'
] =
False
else:
raise
Exception
("
Unrecognized
argument
\'{0
:s
}\'".
format
(
arg
))
return
config
def
print_help
()
:
print
("
")
print
("
***
PYCOMPARE
***
")
print
("
")
print
("
Compare
the
output
of
C
++
and
FORTRAN
codes.
")
print
("
")
print
("
Usage:
\"./
pycompare
OPTIONS
\"
")
print
("
")
print
("
Valid
options
are:
")
print
("
--ffile=
FORTRAN_OUTPUT
File
containing
the
output
of
the
FORTRAN
code
(
mandatory
).")
print
("
--cfile=
C++_OUTPUT
File
containing
the
output
of
the
C
++
code
(
mandatory
).")
print
("
--help
Print
this
help
and
exit.
")
print
("
--html
Enable
logging
to
HTML
file.
")
print
("
--logname
Name
of
the
HTML
log
file
(
default
is
\"
pycompare.html
\").")
print
("
--quick
Stop
on
first
mismatch
(
default
is
to
perform
a
full
check
).")
print
("
--warn
Set
a
fractional
threshold
for
numeric
warning
(default=
0.005).")
print
("
")
###
PROGRAM
EXECUTION
###
res =
main()
if
(
res
>
0): exit(1)
exit(0)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment