Skip to content

Commit

Permalink
Merge pull request #225 from pydata/pre-commit-ci-update-config
Browse files Browse the repository at this point in the history
[pre-commit.ci] pre-commit autoupdate
  • Loading branch information
bashtage authored Jan 27, 2025
2 parents bd65623 + e4dbb49 commit 0bd9f41
Show file tree
Hide file tree
Showing 12 changed files with 33 additions and 41 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ repos:
exclude: ".ipynb"

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.6
rev: v0.9.3
hooks:
- id: ruff-format
types_or: [ python, pyi, jupyter ]
6 changes: 2 additions & 4 deletions patsy/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ def _build_subterm(subterm, factor_infos, factor_values, out):
contrast = subterm.contrast_matrices[factor]
if np.any(factor_values[factor] < 0):
raise PatsyError(
"can't build a design matrix " "containing missing values",
"can't build a design matrix containing missing values",
factor,
)
out[:, i] *= contrast.matrix[factor_values[factor], column_idx]
Expand Down Expand Up @@ -929,9 +929,7 @@ def build_design_matrices(
if isinstance(NA_action, str):
NA_action = NAAction(NA_action)
if return_type == "dataframe" and not have_pandas:
raise PatsyError(
"pandas.DataFrame was requested, but pandas " "is not installed"
)
raise PatsyError("pandas.DataFrame was requested, but pandas is not installed")
if return_type not in ("matrix", "dataframe"):
raise PatsyError(
"unrecognized output type %r, should be "
Expand Down
2 changes: 1 addition & 1 deletion patsy/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ def categorical_to_int(data, levels, NA_action, origin=None):
level_to_int = dict(zip(levels, range(len(levels))))
except TypeError:
raise PatsyError(
"Error interpreting categorical data: " "all items must be hashable", origin
"Error interpreting categorical data: all items must be hashable", origin
)

# fastpath to avoid doing an item-by-item iteration over boolean arrays,
Expand Down
4 changes: 2 additions & 2 deletions patsy/constraint.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ def _eval_binary_div(self, tree):
right = self.eval(tree.args[1])
if not self.is_constant(right):
raise PatsyError(
"Can't divide by a variable in a linear " "constraint", tree.args[1]
"Can't divide by a variable in a linear constraint", tree.args[1]
)
return left / right[-1]

Expand All @@ -327,7 +327,7 @@ def _eval_binary_multiply(self, tree):
return left * right[-1]
else:
raise PatsyError(
"Can't multiply one variable by another " "in a linear constraint", tree
"Can't multiply one variable by another in a linear constraint", tree
)

def _eval_binary_eq(self, tree):
Expand Down
8 changes: 4 additions & 4 deletions patsy/desc.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ def _eval_binary_minus(evaluator, tree):
def _check_interactable(expr):
if expr.intercept:
raise PatsyError(
"intercept term cannot interact with " "anything else",
"intercept term cannot interact with anything else",
expr.intercept_origin,
)

Expand Down Expand Up @@ -392,7 +392,7 @@ def _eval_one(evaluator, tree):


def _eval_number(evaluator, tree):
raise PatsyError("numbers besides '0' and '1' are " "only allowed with **", tree)
raise PatsyError("numbers besides '0' and '1' are only allowed with **", tree)


def _eval_python_expr(evaluator, tree):
Expand Down Expand Up @@ -437,14 +437,14 @@ def eval(self, tree, require_evalexpr=True):
key = (tree.type, len(tree.args))
if key not in self._evaluators:
raise PatsyError(
"I don't know how to evaluate this " "'%s' operator" % (tree.type,),
"I don't know how to evaluate this '%s' operator" % (tree.type,),
tree.token,
)
result = self._evaluators[key](self, tree)
if require_evalexpr and not isinstance(result, IntermediateExpr):
if isinstance(result, ModelDesc):
raise PatsyError(
"~ can only be used once, and " "only at the top level", tree
"~ can only be used once, and only at the top level", tree
)
else:
raise PatsyError(
Expand Down
20 changes: 8 additions & 12 deletions patsy/design_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,14 +88,14 @@ def __init__(self, factor, type, state, num_columns=None, categories=None):
if self.type == "numerical":
if not isinstance(num_columns, int):
raise ValueError(
"For numerical factors, num_columns " "must be an integer"
"For numerical factors, num_columns must be an integer"
)
if categories is not None:
raise ValueError("For numerical factors, categories " "must be None")
raise ValueError("For numerical factors, categories must be None")
else:
assert self.type == "categorical"
if num_columns is not None:
raise ValueError("For categorical factors, num_columns " "must be None")
raise ValueError("For categorical factors, num_columns must be None")
categories = tuple(categories)
self.num_columns = num_columns
self.categories = categories
Expand Down Expand Up @@ -280,8 +280,7 @@ def __init__(self, column_names, factor_infos=None, term_codings=None):

if (factor_infos is None) != (term_codings is None):
raise ValueError(
"Must specify either both or neither of "
"factor_infos= and term_codings="
"Must specify either both or neither of factor_infos= and term_codings="
)

self.factor_infos = factor_infos
Expand All @@ -304,17 +303,15 @@ def __init__(self, column_names, factor_infos=None, term_codings=None):
term_factors = set(term.factors)
for subterm in subterms:
if not isinstance(subterm, SubtermInfo):
raise ValueError("expected SubtermInfo, " "not %r" % (subterm,))
raise ValueError("expected SubtermInfo, not %r" % (subterm,))
if not term_factors.issuperset(subterm.factors):
raise ValueError("unexpected factors in subterm")

all_factors = set()
for term in self.term_codings:
all_factors.update(term.factors)
if all_factors != set(self.factor_infos):
raise ValueError(
"Provided Term objects and factor_infos " "do not match"
)
raise ValueError("Provided Term objects and factor_infos do not match")
for factor, factor_info in self.factor_infos.items():
if not isinstance(factor_info, FactorInfo):
raise ValueError(
Expand Down Expand Up @@ -343,8 +340,7 @@ def __init__(self, column_names, factor_infos=None, term_codings=None):
exp_cols *= cm.shape[1]
if cat_factors != set(subterm.contrast_matrices):
raise ValueError(
"Mismatch between contrast_matrices "
"and categorical factors"
"Mismatch between contrast_matrices and categorical factors"
)
if exp_cols != subterm.num_columns:
raise ValueError("Unexpected num_columns")
Expand All @@ -368,7 +364,7 @@ def __init__(self, column_names, factor_infos=None, term_codings=None):
idx += term_columns
if idx != len(self.column_names):
raise ValueError(
"mismatch between column_names and columns " "coded by given terms"
"mismatch between column_names and columns coded by given terms"
)
self.term_name_slices = OrderedDict(
[(term.name(), slice_) for (term, slice_) in self.term_slices.items()]
Expand Down
2 changes: 1 addition & 1 deletion patsy/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,7 +526,7 @@ def new_name_maker(token):
# original code
if has_bare_variable_reference(state["transforms"], self.code):
raise PatsyError(
"names of this form are reserved for " "internal use (%s)" % (token,),
"names of this form are reserved for internal use (%s)" % (token,),
token.origin,
)
# Pull out all the '_patsy_stobj0__center__.transform(x)' pieces
Expand Down
10 changes: 4 additions & 6 deletions patsy/highlevel.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def iter_maker():
raise PatsyError("bad formula-like object")
if len(design_infos[0].column_names) > 0:
raise PatsyError(
"encountered outcome variables for a model " "that does not expect them"
"encountered outcome variables for a model that does not expect them"
)
return design_infos[1]

Expand Down Expand Up @@ -149,9 +149,7 @@ def incr_dbuilders(formula_like, data_iter_maker, eval_env=0, NA_action="drop"):
# any object with a special method __patsy_get_model_desc__
def _do_highlevel_design(formula_like, data, eval_env, NA_action, return_type):
if return_type == "dataframe" and not have_pandas:
raise PatsyError(
"pandas.DataFrame was requested, but pandas " "is not installed"
)
raise PatsyError("pandas.DataFrame was requested, but pandas is not installed")
if return_type not in ("matrix", "dataframe"):
raise PatsyError(
"unrecognized output type %r, should be "
Expand Down Expand Up @@ -219,7 +217,7 @@ def _regularize_matrix(m, default_column_prefix):
if rhs_orig_index is not None and lhs_orig_index is not None:
if not rhs_orig_index.equals(lhs_orig_index):
raise PatsyError(
"index mismatch: outcome and " "predictor have incompatible indexes"
"index mismatch: outcome and predictor have incompatible indexes"
)
if return_type == "dataframe":
if rhs_orig_index is not None and lhs_orig_index is None:
Expand Down Expand Up @@ -298,7 +296,7 @@ def dmatrix(formula_like, data={}, eval_env=0, NA_action="drop", return_type="ma
)
if lhs.shape[1] != 0:
raise PatsyError(
"encountered outcome variables for a model " "that does not expect them"
"encountered outcome variables for a model that does not expect them"
)
return rhs

Expand Down
14 changes: 7 additions & 7 deletions patsy/mgcv_cubic_splines.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def _row_tensor_product(dms):
for dm in dms:
if dm.shape[0] != tp_nrows:
raise ValueError(
"Tensor product arguments should have " "same number of rows."
"Tensor product arguments should have same number of rows."
)
tp_ncols *= dm.shape[1]
tp = np.zeros((tp_nrows, tp_ncols))
Expand Down Expand Up @@ -624,7 +624,7 @@ def memorize_chunk(
x = x[:, 0]
if x.ndim > 1:
raise ValueError(
"Input to %r must be 1-d, " "or a 2-d column vector." % (self._name,)
"Input to %r must be 1-d, or a 2-d column vector." % (self._name,)
)

self._tmp.setdefault("xs", []).append(x)
Expand All @@ -649,7 +649,7 @@ def memorize_finish(self):
else:
constraints = np.atleast_2d(constraints)
if constraints.ndim != 2:
raise ValueError("Constraints must be 2-d array or " "1-d vector.")
raise ValueError("Constraints must be 2-d array or 1-d vector.")
n_constraints = constraints.shape[0]

n_inner_knots = None
Expand Down Expand Up @@ -704,7 +704,7 @@ def transform(
x = x[:, 0]
if x.ndim > 1:
raise ValueError(
"Input to %r must be 1-d, " "or a 2-d column vector." % (self._name,)
"Input to %r must be 1-d, or a 2-d column vector." % (self._name,)
)
dm = _get_crs_dmatrix(
x, self._all_knots, self._constraints, cyclic=self._cyclic
Expand Down Expand Up @@ -982,7 +982,7 @@ def memorize_finish(self):
else:
constraints = np.atleast_2d(constraints)
if constraints.ndim != 2:
raise ValueError("Constraints must be 2-d array or " "1-d vector.")
raise ValueError("Constraints must be 2-d array or 1-d vector.")

self._constraints = constraints

Expand All @@ -992,7 +992,7 @@ def transform(self, *args, **kwargs):
arg = atleast_2d_column_default(arg)
if arg.ndim != 2:
raise ValueError(
"Each tensor product argument must be " "a 2-d array or 1-d vector."
"Each tensor product argument must be a 2-d array or 1-d vector."
)
args_2d.append(arg)

Expand Down Expand Up @@ -1190,7 +1190,7 @@ def test_te_2smooths():
assert np.allclose(dmatrix_nocons, dmatrix_R_nocons, rtol=1e-12, atol=0.0)

builder = incr_dbuilder(
"te(cr(x1, df=5), cc(x2, df=6), " "constraints='center') - 1",
"te(cr(x1, df=5), cc(x2, df=6), constraints='center') - 1",
lambda: iter(data_chunked),
)
dmatrix_cons = build_design_matrices([builder], new_data)[0]
Expand Down
2 changes: 1 addition & 1 deletion patsy/parse_formula.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def _read_python_expr(it, end_tokens):
return Token(token_type, Origin.combine(origins), extra=expr_text)
else:
raise PatsyError(
"unclosed bracket in embedded Python " "expression", Origin.combine(origins)
"unclosed bracket in embedded Python expression", Origin.combine(origins)
)


Expand Down
2 changes: 1 addition & 1 deletion patsy/splines.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def memorize_chunk(
if x.ndim == 2 and x.shape[1] == 1:
x = x[:, 0]
if x.ndim > 1:
raise ValueError("input to 'bs' must be 1-d, " "or a 2-d column vector")
raise ValueError("input to 'bs' must be 1-d, or a 2-d column vector")
# There's no better way to compute exact quantiles than memorizing
# all data.
self._tmp.setdefault("xs", []).append(x)
Expand Down
2 changes: 1 addition & 1 deletion patsy/tokens.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def python_tokenize(code):
origin = Origin(code, start, end)
if pytype == tokenize.ERRORTOKEN:
raise PatsyError(
"error tokenizing input " "(maybe an unclosed string?)", origin
"error tokenizing input (maybe an unclosed string?)", origin
)
if pytype == tokenize.COMMENT:
raise PatsyError("comments are not allowed", origin)
Expand Down

0 comments on commit 0bd9f41

Please sign in to comment.