diff --git a/docs/src/examples.md b/docs/src/examples.md index 8aaa828d..16d27c46 100644 --- a/docs/src/examples.md +++ b/docs/src/examples.md @@ -229,7 +229,7 @@ h,i,j # note this isn't required, but can be convenient in certain cases file = CSV.File(IOBuffer(data); normalizenames=true) -# we can acces the first column like +# we can access the first column like file._1 # another example where we may want to normalize is column names with spaces in them @@ -491,7 +491,7 @@ using CSV # In this data, we have a few "quoted" fields, which means the field's value starts and ends with `quotechar` (or # `openquotechar` and `closequotechar`, respectively). Quoted fields allow the field to contain characters that would otherwise # be significant to parsing, such as delimiters or newline characters. When quoted, parsing will ignore these otherwise -# signficant characters until the closing quote character is found. For quoted fields that need to also include the quote +# significant characters until the closing quote character is found. For quoted fields that need to also include the quote # character itself, an escape character is provided to tell parsing to ignore the next character when looking for a close quote # character. In the syntax examples, the keyword arguments are passed explicitly, but these also happen to be the default # values, so just doing `CSV.File(IOBuffer(data))` would result in successful parsing. diff --git a/docs/src/reading.md b/docs/src/reading.md index 66e5e659..d12a646c 100644 --- a/docs/src/reading.md +++ b/docs/src/reading.md @@ -23,7 +23,7 @@ Any delimited input is ultimately converted to a byte buffer (`Vector{UInt8}`) f ## [`header`](@id header) -The `header` keyword argument controls how column names are treated when processing files. By default, it is assumed that the column names are the first row/line of the input, i.e. `header=1`. Alternative valid aguments for `header` include: +The `header` keyword argument controls how column names are treated when processing files. By default, it is assumed that the column names are the first row/line of the input, i.e. `header=1`. Alternative valid augments for `header` include: * `Integer`, e.g. `header=2`: provide the row number as an `Integer` where the column names can be found * `Bool`, e.g. `header=false`: no column names exist in the data; column names will be auto-generated depending on the # of columns, like `Column1`, `Column2`, etc. * `Vector{String}` or `Vector{Symbol}`: manually provide column names as strings or symbols; should match the # of columns in the data. A copy of the `Vector` will be made and converted to `Vector{Symbol}` @@ -79,7 +79,7 @@ This argument specifies whether "empty rows", where consecutive [newlines](@ref ## [`select` / `drop`](@id select) -Arguments that control which columns from the input data will actually be parsed and available after processing. `select` controls which columns _will_ be accessible after parsing while `drop` controls which columns to _ignore_. Either argument can be provided as a vector of `Integer`, `String`, or `Symbol`, specifing the column numbers or names to include/exclude. A vector of `Bool` matching the number of columns in the input data can also be provided, where each element specifies whether the corresponding column should be included/excluded. Finally, these arguments can also be given as boolean functions, of the form `(i, name) -> Bool`, where each column number and name will be given as arguments and the result of the function will determine if the column will be included/excluded. +Arguments that control which columns from the input data will actually be parsed and available after processing. `select` controls which columns _will_ be accessible after parsing while `drop` controls which columns to _ignore_. Either argument can be provided as a vector of `Integer`, `String`, or `Symbol`, specifying the column numbers or names to include/exclude. A vector of `Bool` matching the number of columns in the input data can also be provided, where each element specifies whether the corresponding column should be included/excluded. Finally, these arguments can also be given as boolean functions, of the form `(i, name) -> Bool`, where each column number and name will be given as arguments and the result of the function will determine if the column will be included/excluded. ### Examples * [Including/excluding columns](@ref select_example) diff --git a/src/file.jl b/src/file.jl index 66632471..5907ec9d 100644 --- a/src/file.jl +++ b/src/file.jl @@ -566,7 +566,7 @@ function parsefilechunk!(ctx::Context, pos, len, rowsguess, rowoffset, columns, end if !ctx.threaded && ctx.ntasks > 1 && !ctx.silencewarnings # !ctx.threaded && ctx.ntasks > 1 indicate that multithreaded parsing failed. - # Thes messages echo the corresponding debug statement in the definition of ctx + # These messages echo the corresponding debug statement in the definition of ctx if numwarnings[] > 0 @warn "Multithreaded parsing failed and fell back to single-threaded parsing, check previous warnings for possible reasons." else diff --git a/test/testfiles/test_one_row_of_data.cscv b/test/testfiles/test_one_row_of_data.cscv deleted file mode 100644 index 3fde4e20..00000000 --- a/test/testfiles/test_one_row_of_data.cscv +++ /dev/null @@ -1 +0,0 @@ -1,2,3 \ No newline at end of file diff --git a/test/write.jl b/test/write.jl index 392d540d..314e18a7 100644 --- a/test/write.jl +++ b/test/write.jl @@ -342,7 +342,7 @@ end CSV.write(io, Tuple[(1,), (2,)], header=false) @test String(take!(io)) == "1\n2\n" - # parition writing + # partition writing io = IOBuffer() io2 = IOBuffer() CSV.write([io, io2], Tables.partitioner((default_table, default_table)); partition=true)