Skip to content

Commit

Permalink
Merge pull request #597 from gridap/shifted_nabla
Browse files Browse the repository at this point in the history
Adding new differential operators
  • Loading branch information
fverdugo authored May 25, 2021
2 parents 74622e5 + e3c116d commit 5a9bf57
Show file tree
Hide file tree
Showing 9 changed files with 195 additions and 6 deletions.
8 changes: 8 additions & 0 deletions src/CellData/CellFields.jl
Original file line number Diff line number Diff line change
Expand Up @@ -479,6 +479,14 @@ for op in (:inner,:outer,:double_contraction,:+,:-,:*,:cross,:dot,:/)
end
end

Base.broadcasted(f,a::CellField,b::CellField) = Operation((i,j)->f.(i,j))(a,b)
Base.broadcasted(f,a::Number,b::CellField) = Operation((i,j)->f.(i,j))(a,b)
Base.broadcasted(f,a::CellField,b::Number) = Operation((i,j)->f.(i,j))(a,b)
Base.broadcasted(f,a::Function,b::CellField) = Operation((i,j)->f.(i,j))(a,b)
Base.broadcasted(f,a::CellField,b::Function) = Operation((i,j)->f.(i,j))(a,b)
Base.broadcasted(::typeof(*),::typeof(∇),f::CellField) = Operation(Fields._extract_grad_diag)((f))
Base.broadcasted(::typeof(*),s::Fields.ShiftedNabla,f::CellField) = Operation(Fields._extract_grad_diag)(s(f))

dot(::typeof(∇),f::CellField) = divergence(f)
function (*)(::typeof(∇),f::CellField)
msg = "Syntax ∇*f has been removed, use ∇⋅f (\\nabla \\cdot f) instead"
Expand Down
51 changes: 51 additions & 0 deletions src/Fields/DiffOperators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -116,3 +116,54 @@ Equivalent to
"""
cross(::typeof(∇),f::Field) = curl(f)
cross(::typeof(∇),f::Function) = curl(f)

_extract_grad_diag(x::TensorValue) = diag(x)
_extract_grad_diag(x) = @notimplemented

function Base.broadcasted(::typeof(*),::typeof(∇),f)
g = (f)
Operation(_extract_grad_diag)(g)
end

function Base.broadcasted(::typeof(*),::typeof(∇),f::Function)
Base.broadcasted(*,∇,GenericField(f))
end

struct ShiftedNabla{N,T}
v::VectorValue{N,T}
end

(+)(::typeof(∇),v::VectorValue) = ShiftedNabla(v)
(+)(v::VectorValue,::typeof(∇)) = ShiftedNabla(v)
(-)(::typeof(∇),v::VectorValue) = ShiftedNabla(-v)

function (s::ShiftedNabla)(f)
Operation((a,b)->a+s.vb)(gradient(f),f)
end

(s::ShiftedNabla)(f::Function) = s(GenericField(f))

function evaluate!(cache,k::Broadcasting{<:ShiftedNabla},f)
s = k.f
g = Broadcasting(∇)(f)
Broadcasting(Operation((a,b)->a+s.vb))(g,f)
end

dot(s::ShiftedNabla,f) = Operation(tr)(s(f))
outer(s::ShiftedNabla,f) = s(f)
outer(f,s::ShiftedNabla) = transpose(gradient(f))
cross(s::ShiftedNabla,f) = Operation(grad2curl)(s(f))

dot(s::ShiftedNabla,f::Function) = dot(s,GenericField(f))
outer(s::ShiftedNabla,f::Function) = outer(s,GenericField(f))
outer(f::Function,s::ShiftedNabla) = outer(GenericField(f),s)
cross(s::ShiftedNabla,f::Function) = cross(s,GenericField(f))

function Base.broadcasted(::typeof(*),s::ShiftedNabla,f)
g = s(f)
Operation(_extract_grad_diag)(g)
end

function Base.broadcasted(::typeof(*),s::ShiftedNabla,f::Function)
Base.broadcasted(*,s,GenericField(f))
end
2 changes: 1 addition & 1 deletion src/Fields/Fields.jl
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ using Gridap.Algebra: fill_entries!

using Gridap.TensorValues

using LinearAlgebra: mul!, Transpose
using LinearAlgebra: mul!, Transpose, diag

using ForwardDiff
using FillArrays
Expand Down
17 changes: 13 additions & 4 deletions src/Fields/FieldsInterfaces.jl
Original file line number Diff line number Diff line change
Expand Up @@ -344,10 +344,15 @@ end

@inline transpose(f::Field) = f

@inline *(A::Number, B::Field) = ConstantField(A)*B
@inline *(A::Field, B::Number) = A*ConstantField(B)
@inline (A::Number, B::Field) = ConstantField(A)B
@inline (A::Field, B::Number) = AConstantField(B)
for op in (:+,:-,:*,:,:,:)
@eval ($op)(a::Field,b::Number) = Operation($op)(a,ConstantField(b))
@eval ($op)(a::Number,b::Field) = Operation($op)(ConstantField(a),b)
end

#@inline *(A::Number, B::Field) = ConstantField(A)*B
#@inline *(A::Field, B::Number) = A*ConstantField(B)
#@inline ⋅(A::Number, B::Field) = ConstantField(A)⋅B
#@inline ⋅(A::Field, B::Number) = A⋅ConstantField(B)

#@inline *(A::Function, B::Field) = GenericField(A)*B
#@inline *(A::Field, B::Function) = GenericField(B)*A
Expand Down Expand Up @@ -390,6 +395,10 @@ function product_rule(::typeof(⋅),f1::VectorValue,f2::VectorValue,∇f1,∇f2)
∇f1f2 + ∇f2f1
end

function product_rule(::typeof(),f1::TensorValue,f2::VectorValue,∇f1,∇f2)
∇f1f2 + ∇f2transpose(f1)
end

for op in (:*,:,:,:)
@eval begin
function gradient(a::OperationField{typeof($op)})
Expand Down
33 changes: 33 additions & 0 deletions src/TensorValues/Operations.jl
Original file line number Diff line number Diff line change
Expand Up @@ -598,6 +598,39 @@ transpose(a::SymTensorValue) = a
Meta.parse("SymTensorValue{D}($str)")
end

###############################################################
# diag
###############################################################

function LinearAlgebra.diag(a::TensorValue{1,1})
VectorValue(a.data[1])
end

function LinearAlgebra.diag(a::TensorValue{2,2})
VectorValue(a.data[1],a.data[4])
end

function LinearAlgebra.diag(a::TensorValue{3,3})
VectorValue(a.data[1],a.data[5],a.data[9])
end

function LinearAlgebra.diag(a::TensorValue)
@notimplemented
end

###############################################################
# Broadcast
###############################################################
# TODO more cases need to be added

function Base.broadcasted(f,a::VectorValue,b::VectorValue)
VectorValue(map(f,a.data,b.data))
end

function Base.broadcasted(f,a::TensorValue,b::TensorValue)
TensorValue(map(f,a.data,b.data))
end

###############################################################
# Define new operations for Gridap types
###############################################################
Expand Down
40 changes: 40 additions & 0 deletions test/CellDataTests/CellFieldsTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,46 @@ test_array(∇vx,collect(∇vx))
∇fx = (f)(x)
test_array(∇fx,collect(∇fx))


k = VectorValue(1.0,2.0)
∇kfx = ((∇+k)(f))(x)
test_array(∇kfx,collect(∇kfx))

∇kvx = ((∇+k)(v))(x)
test_array(∇kvx,collect(∇kvx))

β(x) = 2*x[1]
α = CellField(x->2*x,trian)
ax = ((∇+k)(β*α))(x)
test_array(ax,collect(ax))

ν = CellField(x->2*x,trian)
ax =((∇-k)ν)(x)
test_array(ax,collect(ax))

ax =((∇-k)×ν)(x)
test_array(ax,collect(ax))

ax =((∇-k)ν)(x)
test_array(ax,collect(ax))

ax =(∇.*ν)(x)
test_array(ax,collect(ax))

ax =.*ν)(x)
test_array(ax,collect(ax))

ax =((∇-k).*ν)(x)
test_array(ax,collect(ax))

ax =(∇-k))(x)
test_array(ax,collect(ax))

σ(x) = diagonal_tensor(VectorValue(1*x[1],2*x[2]))
Fields.gradient(::typeof(σ)) = x-> ThirdOrderTensorValue{2,2,2,Float64}(1,0,0,0,0,0,0,2)
ax = ((∇+k)(σα))(x)
test_array(ax,collect(ax))

h = Operation(*)(2,f)
hx = h(x)
test_array(hx,2*fx)
Expand Down
18 changes: 17 additions & 1 deletion test/FieldsTests/DiffOperatorsTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ np = 4
p = Point(1,2)
x = fill(p,np)

v = 3.0
v = VectorValue(3.0,2.0)
f = MockField(v)

@test (f) == gradient(f)
Expand All @@ -39,6 +39,20 @@ f = MockField(v)

@test Δ(f) ==(f)

@test (∇.*f)(x) != nothing

@test ((∇+p).*f)(x) != nothing

@test ((∇+p)(f))(x) == ((f) + pf)(x)

g(x) = 2*x[2]

@test ((∇+p)(g))(x) == ((GenericField(g)) + pGenericField(g))(x)

@test (∇+p)f != nothing
@test (∇+p)×f != nothing
@test (∇+p)f != nothing
@test f(∇+p) != nothing

l = 10
f = Fill(f,l)
Expand All @@ -47,6 +61,8 @@ f = Fill(f,l)
@test Broadcasting(curl)(f) == Broadcasting(Operation(grad2curl))(Broadcasting(∇)(f))
@test Broadcasting(ε)(f) == Broadcasting(Operation(symmetric_part))(Broadcasting(∇)(f))

@test evaluate(Broadcasting(∇+p)(f),x) == evaluate( Broadcasting(Operation((g,f)->g+pf))(Broadcasting(∇)(f),f) ,x)

# Test automatic differentiation

u_scal(x) = x[1]^2 + x[2]
Expand Down
16 changes: 16 additions & 0 deletions test/FieldsTests/FieldInterfacesTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,22 @@ test_field(f,z,f.(z),grad=∇(f).(z))
#c = return_cache(∇f,x)
#@btime evaluate!($c,$∇f,$x)

Tfun(x) = diagonal_tensor(VectorValue(1*x[1],2*x[2]))
bfun(x) = VectorValue(x[2],x[1])
Fields.gradient(::typeof(Tfun)) = x-> ThirdOrderTensorValue{2,2,2,Float64}(1,0,0,0,0,0,0,2)
a = GenericField(Tfun)
b = GenericField(bfun)

f = Operation()(a,b)
cp = Tfun(p)bfun(p)
∇cp = (Tfun)(p)bfun(p) + (bfun)(p)transpose(Tfun(p))
test_field(f,p,cp)
test_field(f,p,cp,grad=∇cp)
test_field(f,x,f.(x))
test_field(f,x,f.(x),grad=(f).(x))
test_field(f,z,f.(z))
test_field(f,z,f.(z),grad=(f).(z))

afun(x) = x.+2
bfun(x) = 2*x

Expand Down
16 changes: 16 additions & 0 deletions test/TensorValuesTests/OperationsTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -688,4 +688,20 @@ b = 4.0 - 3.0*im
@test outer(a,b) == a*b
@test inner(a,b) == a*b

# Broadcast
a = VectorValue(1,2,3)
b = VectorValue(1.,2.,3.)
c = a .* b
@test isa(c,VectorValue)
@test c.data == map(*,a.data,b.data)

a = TensorValue(1,2,3,4)
b = TensorValue(1.,2.,3.,4.)
c = a .* b
@test isa(c,TensorValue)
@test c.data == map(*,a.data,b.data)

@test diag(a) == VectorValue(1,4)


end # module OperationsTests

0 comments on commit 5a9bf57

Please sign in to comment.