Skip to content

Commit

Permalink
Merge pull request #205 from DimitriPapadopoulos/f-strings
Browse files Browse the repository at this point in the history
Use f-strings where possible
  • Loading branch information
jjhelmus authored Nov 15, 2023
2 parents 09d9b75 + 8728b50 commit 5dd3ab9
Show file tree
Hide file tree
Showing 13 changed files with 59 additions and 61 deletions.
2 changes: 1 addition & 1 deletion examples/fitting_data/t1_measurements/pt.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,5 +40,5 @@ def fit_func(p, x):
ax.set_title(peak)

# save the figure
fig.savefig(peak + "_plot.png")
fig.savefig(f"{peak}_plot.png")
plt.close()
2 changes: 1 addition & 1 deletion examples/plotting/plot_2d/plot_boxes.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,5 +56,5 @@
ax.set_title(name)

# save the figure
fig.savefig(name + ".png")
fig.savefig(f"{name}.png")
del(fig)
2 changes: 1 addition & 1 deletion examples/plotting/plot_sparky_savefile/plot_sparky.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def plot(dic, data, outfile=None, xlims=[], ylims=[]):
# save figure
if outfile is None:
outfile = "plot"
fig.savefig(outfile + ".png")
fig.savefig(f"{outfile}.png")

return

Expand Down
3 changes: 1 addition & 2 deletions nmrglue/analysis/linesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -496,8 +496,7 @@ def fit_NDregion(region, lineshapes, params, amps, bounds=None,
if wmask is None: # default is to include all points in region
wmask = np.ones(shape, dtype='bool')
if wmask.shape != shape:
err = "wmask has incorrect shape:" + str(wmask.shape) + \
" should be " + str(shape)
err = f"wmask has incorrect shape: {wmask.shape} should be {shape}"
raise ValueError(err)

# DEBUGGING
Expand Down
4 changes: 2 additions & 2 deletions nmrglue/analysis/peakpick.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,13 +326,13 @@ def pack_table(locations, cluster_ids=None, scales=None, amps=None,
ndim = len(locations[0])
anames = axis_names[-ndim:]

dt = [(a + "_AXIS", float) for a in anames]
dt = [(f"{a}_AXIS", float) for a in anames]
rec = np.rec.array(locations, dtype=dt)

if cluster_ids is not None:
rec = table.append_column(rec, cluster_ids, 'cID', 'int')
if scales is not None:
names = [a + "_LW" for a in anames]
names = [f"{a}_LW" for a in anames]
for n, c in zip(names, np.array(scales).T):
rec = table.append_column(rec, c, n, 'float')
if amps is not None:
Expand Down
48 changes: 24 additions & 24 deletions nmrglue/fileio/bruker.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ def add_axis_to_udic(udic, dic, udim, strip_fake):
"""
# This could still use some work
b_dim = udic['ndim'] - udim - 1 # last dim
acq_file = "acqu" + str(b_dim + 1) + "s"
pro_file = "proc" + str(b_dim + 1) + "s"
acq_file = f"acqu{b_dim + 1}s"
pro_file = f"proc{b_dim + 1}s"

# Because they're inconsistent,..
if acq_file == "acqu1s":
Expand Down Expand Up @@ -1570,7 +1570,7 @@ def read_binary(filename, shape=(1), cplex=True, big=True, isfloat=False):
return dic, data.reshape(shape)

except ValueError:
warn(str(data.shape) + "cannot be shaped into" + str(shape))
warn(f"{data.shape} cannot be shaped into {shape}")
return dic, data


Expand Down Expand Up @@ -2193,9 +2193,9 @@ def read_jcamp(filename, encoding=locale.getpreferredencoding()):
key, value = parse_jcamp_line(line, f)
dic[key] = value
except:
warn("Unable to correctly parse line:" + line)
warn(f"Unable to correctly parse line: {line}")
else:
warn("Extraneous line:" + line)
warn(f"Extraneous line: {line}")

return dic

Expand All @@ -2216,7 +2216,7 @@ def parse_jcamp_line(line, f):

if "<" in text: # string
while ">" not in text: # grab additional text until ">" in string
text = text + "\n" + f.readline().rstrip()
text += "\n" + f.readline().rstrip()
value = text[1:-1] # remove < and >

elif "(" in text: # array
Expand Down Expand Up @@ -2341,24 +2341,24 @@ def write_jcamp_pair(f, key, value):
"""

# the parameter name and such
line = "##$" + key + "= "
line = f"##${key}= "

# need to be type not isinstance since isinstance(bool, int) == True
if type(value) == float or type(value) == int: # simple numbers
line = line + repr(value)

elif isinstance(value, str): # string
line = line + "<" + value + ">"

elif type(value) == bool: # yes or no
# need to be checked first since isinstance(bool, int) == True
if isinstance(value, bool): # yes or no
if value:
line = line + "yes"
line += "yes"
else:
line = line + "no"
line += "no"

elif isinstance(value, (float, int)): # simple numbers
line += repr(value)

elif isinstance(value, str): # string
line += f"<{value}>"

elif isinstance(value, list):
# write out the current line
line = line + "(0.." + repr(len(value) - 1) + ")"
line += f"(0..{len(value) - 1!r})"
f.write(line)
f.write("\n")
line = ""
Expand All @@ -2373,7 +2373,7 @@ def write_jcamp_pair(f, key, value):
line = ""

if isinstance(v, str):
to_add = "<" + v + ">"
to_add = f"<{v}>"
else:
to_add = repr(v)

Expand All @@ -2383,7 +2383,7 @@ def write_jcamp_pair(f, key, value):
line = ""

if line != "":
line = line + to_add + " "
line += to_add + " "
else:
line = to_add + " "

Expand Down Expand Up @@ -2582,20 +2582,20 @@ def write_pprog(filename, dic, overwrite=False):

# write our the variables
for k, v in dic["var"].items():
f.write("\"" + k + "=" + v + "\"\n")
f.write(f'"{k}={v}"\n')

# write out each loop
for i, steps in enumerate(dic["loop"]):

# write our the increments
for v in dic["incr"][i]:
f.write("d01 id" + str(v) + "\n")
f.write(f"d01 id{v}\n")

# write out the phases
for v, w in zip(dic["phase"][i], dic["ph_extra"][i]):
f.write("d01 ip" + str(v) + " " + str(w) + "\n")
f.write(f"d01 ip{v} {w}\n")

f.write("lo to 0 times " + str(steps) + "\n")
f.write(f"lo to 0 times {steps}\n")

# close the file
f.close()
Expand Down
2 changes: 1 addition & 1 deletion nmrglue/fileio/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,7 @@ def to_csdm(self):
label=value["label"],
)
for key, value in list(self._udic.items())
if type(key) == int and value["size"] != 1
if type(key) is int and value["size"] != 1
]

return cp.CSDM(
Expand Down
2 changes: 1 addition & 1 deletion nmrglue/fileio/glue.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def put_dic(f, dic, dataset="spectrum"):
for key, value in dic.items():

# axis dictionaries
if type(key) == int and type(value) == dict:
if type(key) is int and type(value) is dict:
axis = key
for axiskey, axisvalue in value.items():
fullkey = str(axis) + "_" + axiskey
Expand Down
15 changes: 7 additions & 8 deletions nmrglue/fileio/jcampdx.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def _parsejcampdx(filename):
# for multi-line data, linebreak must be restored if it has been
# cut out with comments:
if actual[-1] != "\n":
actual = actual + "\n"
actual += "\n"

# encountered new key:
if actual[:2] == "##":
Expand All @@ -85,7 +85,7 @@ def _parsejcampdx(filename):
key = _getkey(currentkey)
value = "".join(currentvaluestrings) # collapse
if not value.strip():
warn("JCAMP-DX key without value:" + key)
warn(f"JCAMP-DX key without value: {key}")
else:
try:
activeblock[key].append(value)
Expand Down Expand Up @@ -121,7 +121,7 @@ def _parsejcampdx(filename):
keystr = keysplit[0][2:] # remove "##" already here
valuestr = keysplit[1]
if not keystr:
warn("Empty key in JCAMP-DX line:" + line)
warn(f"Empty key in JCAMP-DX line: {line}")
currentkey = None
currentvaluestrings = []
continue
Expand All @@ -134,7 +134,7 @@ def _parsejcampdx(filename):
else:
if activeblock:
if currentkey is None:
warn("JCAMP-DX data line without associated key:" + line)
warn(f"JCAMP-DX data line without associated key: {line}")
continue

currentvaluestrings.append(commentsplit[0])
Expand Down Expand Up @@ -272,7 +272,7 @@ def _parse_affn_pac(datalines):
try:
value = float(base + (exp if exp is not None else ""))
except ValueError:
warn("Data parsing failed at line:" + dataline)
warn(f"Data parsing failed at line: {dataline}")
return None
linedata.append(value)
if len(linedata) > 1:
Expand Down Expand Up @@ -375,8 +375,7 @@ def _parse_pseudo(datalines):
valuechar = _DUP_DIGITS[char]
newmode = 3
except KeyError:
warn("Unknown pseudo-digit: " +
char + " at line: " + dataline)
warn(f"Unknown pseudo-digit: {char} at line: {dataline}")
return None

# finish previous number
Expand All @@ -399,7 +398,7 @@ def _parse_pseudo(datalines):
value_to_append,
data)
if not success:
warn("Data parsing failed at line:" + dataline)
warn(f"Data parsing failed at line: {dataline}")
return None

# in DIF mode last of line is same than the first of next line
Expand Down
2 changes: 1 addition & 1 deletion nmrglue/fileio/rnmrtk.py
Original file line number Diff line number Diff line change
Expand Up @@ -738,7 +738,7 @@ def write_par(par_file, dic, overwrite):
f = fileiobase.open_towrite(par_file, overwrite, mode='w')

# write comment line
f.write('Comment \'' + dic['comment'] + '\'\n')
f.write("Comment '" + dic['comment'] + "'\n")

# Dom line, set from layout
l = "Dom " + " ".join(dic['layout'][1])
Expand Down
4 changes: 2 additions & 2 deletions nmrglue/fileio/table.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def pipe2glue(pcomments, pformat, rec):
"""
# add a "#" to the list of comments and we are done
comments = ["# " + c for c in pcomments]
comments = [f"# {c}" for c in pcomments]
return comments, rec


Expand Down Expand Up @@ -62,7 +62,7 @@ def glue2pipe(comments, rec):
"""
# add REMARK to each comment
pcomments = ["REMARK " + c for c in comments]
pcomments = [f"REMARK {c}" for c in comments]

# guess the pipe format strings
pformat = [guess_pformat(rec[t]) for t in rec.dtype.names]
Expand Down
26 changes: 13 additions & 13 deletions nmrglue/fileio/varian.py
Original file line number Diff line number Diff line change
Expand Up @@ -504,8 +504,8 @@ def find_torder(dic, shape):
warn("missing phase order, torder set to 'r'")
return 'r'

warn("No trace ordering for" + str(ndim) +
"dimensional data, torder set to 'r'")
warn(f"No trace ordering for {ndim}dimensional data, "
f"torder set to 'r'")
return 'r'


Expand All @@ -523,7 +523,7 @@ def torder2i2t(torder):
elif torder in ('regular', 'r'):
return fileiobase.index2trace_reg
else:
raise ValueError("unknown torder" + str(torder))
raise ValueError(f"unknown torder {torder}")


def torder2t2i(torder):
Expand All @@ -539,7 +539,7 @@ def torder2t2i(torder):
elif torder in ('regular', 'r'):
return fileiobase.trace2index_reg
else:
raise ValueError("unknown torder" + str(torder))
raise ValueError(f"unknown torder {torder}")


def reorder_data(data, shape, torder):
Expand Down Expand Up @@ -571,7 +571,7 @@ def reorder_data(data, shape, torder):
try:
data = data.reshape(shape)
except ValueError:
warn(str(data.shape) + "cannot be shaped into" + str(shape))
warn(f"{data.shape} cannot be shaped into {shape}")
return data

# all other cases
Expand Down Expand Up @@ -716,14 +716,14 @@ def read_fid(filename, shape=None, torder='flat', as_2d=False,
try:
return dic, reorder_data(data, shape, torder)
except:
warn("data cannot be re-ordered, returning raw 2D data\n" +
"Provided shape: " + str(shape) + " torder: " + str(torder))
warn(f"data cannot be re-ordered, returning raw 2D data\n" +
f"Provided shape: {shape} torder: {torder}")
return dic, data

try:
data = data.reshape(shape)
except ValueError:
warn(str(data.shape) + "cannot be shaped into" + str(shape))
warn(f"{data.shape} cannot be shaped into {shape}")
return dic, data

return dic, data
Expand Down Expand Up @@ -858,7 +858,7 @@ def read_fid_ntraces(filename, shape=None, torder='flat', as_2d=False,
try:
data = data.reshape(shape)
except ValueError:
warn(str(data.shape) + "cannot be shaped into" + str(shape))
warn(f"{data.shape} cannot be shaped into {shape}")
return dic, data

return dic, data
Expand Down Expand Up @@ -942,7 +942,7 @@ def write_fid(filename, dic, data, torder='flat', repack=False, correct=True,
else: # create a generic blockheader
bh = dic2blockheader(make_blockheader(dic, 1))
for i in range(data.shape[0]):
bh[2] = int(i + 1)
bh[2] = i + 1
trace = np.array(interleave_data(data[i]), dtype=dt)
put_block(f, trace, dic["nbheaders"], bh)

Expand Down Expand Up @@ -1027,7 +1027,7 @@ def write_fid_lowmem(filename, dic, data, torder='f', repack=False,
else: # create a generic blockheader
bh = dic2blockheader(make_blockheader(dic, 1))
for ntrace in range(nblocks):
bh[2] = int(ntrace + 1)
bh[2] = ntrace + 1
tup = t2i(data.shape[:-1], ntrace)
trace = np.array(interleave_data(data[tup]), dtype=dt)
put_block(f, trace, dic["nbheaders"], bh)
Expand Down Expand Up @@ -1949,7 +1949,7 @@ def write_procpar(filename, dic, overwrite=False):
print(len(d["values"]), end=' ', file=f) # don't end the line
for value in d["values"]:
# now end the line (for each string)
print('"' + value + '"', file=f)
print(f'"{value}"', file=f)

# print out the last line
print(d["enumerable"], end=' ', file=f)
Expand All @@ -1959,7 +1959,7 @@ def write_procpar(filename, dic, overwrite=False):
if d["basictype"] == "1": # reals
print(e, end=' ', file=f)
elif d["basictype"] == "2": # strings
print('"' + e + '"', end=' ', file=f)
print(f'"{e}"', end=' ', file=f)
print("", file=f) # end the enumerable line

f.close()
Expand Down
Loading

0 comments on commit 5dd3ab9

Please sign in to comment.