diff --git a/Project.toml b/Project.toml index 395d87c..8b41d40 100644 --- a/Project.toml +++ b/Project.toml @@ -6,8 +6,8 @@ version = "0.1.0" [deps] AccurateArithmetic = "22286c92-06ac-501d-9306-4abd417d9753" Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" -ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" ComputableDAGs = "62933717-1c9d-4a3f-b06f-7ab7f17ca32d" +ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" QEDbase = "10e22c08-3ccb-4172-bfcf-7d7aa3d04d93" QEDcore = "35dc0263-cb5f-4c33-a114-1d7f54ab753e" QEDprocesses = "46de9c38-1bb3-4547-a1ec-da24d767fdad" @@ -17,6 +17,11 @@ RuntimeGeneratedFunctions = "7e49a35a-f44a-4d26-94aa-eba1b4ca6b47" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" TypeUtils = "c3b1956e-8857-4d84-9b79-890df85b1e67" +[compat] +ComputableDAGs = "0.1.1" +QEDbase = "0.3" +QEDcore = "0.2" + [extras] ComputableDAGs = "62933717-1c9d-4a3f-b06f-7ab7f17ca32d" QEDbase = "10e22c08-3ccb-4172-bfcf-7d7aa3d04d93" diff --git a/src/QEDFeynman.jl b/src/QEDFeynman.jl index e05d468..5855fde 100644 --- a/src/QEDFeynman.jl +++ b/src/QEDFeynman.jl @@ -16,7 +16,6 @@ using Base.Threads export ParticleValue export ParticleA, ParticleB, ParticleC export ABCParticle, GenericABCProcess, ABCModel, PerturbativeABC -export ComputeTaskABC_P export ComputeTaskABC_S1 export ComputeTaskABC_S2 export ComputeTaskABC_V @@ -28,13 +27,12 @@ export parse_dag # QED model export FeynmanDiagram, FeynmanVertex, FeynmanTie, FeynmanParticle export QEDModel -export ComputeTaskQED_P export ComputeTaskQED_S1 export ComputeTaskQED_S2 export ComputeTaskQED_V export ComputeTaskQED_U export ComputeTaskQED_Sum -export gen_graph +export graph export ParticleValue, ParticleValueSP diff --git a/src/abc/compute.jl b/src/abc/compute.jl index 442f966..d3f97bb 100644 --- a/src/abc/compute.jl +++ b/src/abc/compute.jl @@ -18,19 +18,6 @@ function ComputableDAGs.input_expr( ) end -""" - compute(::ComputeTaskABC_P, data::ParticleValue) - -Return the particle and value as is. - -0 FLOP. -""" -function ComputableDAGs.compute( - ::ComputeTaskABC_P, data::ParticleValue{P} -)::ParticleValue{P} where {P} - return data -end - """ compute(::ComputeTaskABC_U, data::ParticleValue) diff --git a/src/abc/parse.jl b/src/abc/parse.jl index 8436147..daa2734 100644 --- a/src/abc/parse.jl +++ b/src/abc/parse.jl @@ -93,19 +93,21 @@ function parse_dag(filename::AbstractString, proc::GenericABCProcess, verbose::B insert_edge!(graph, sum_node, global_data_out) # remember the data out nodes for connection - dataOutNodes = Dict() + data_out_nodes = Dict() if (verbose) println("Building graph") end - noNodes = 0 + number_of_nodes = 0 nodesToRead = length(nodes) while !isempty(nodes) node = popfirst!(nodes) - noNodes += 1 - if (noNodes % 100 == 0) + number_of_nodes += 1 + if (number_of_nodes % 100 == 0) if (verbose) - percent = string(round(100.0 * noNodes / nodesToRead; digits=2), "%") + percent = string( + round(100.0 * number_of_nodes / nodesToRead; digits=2), "%" + ) print("\rReading Nodes... $percent") end end @@ -114,18 +116,14 @@ function parse_dag(filename::AbstractString, proc::GenericABCProcess, verbose::B # add nodes and edges for the state reading to u(P(Particle)) data_in = insert_node!(graph, DataTask(PARTICLE_VALUE_SIZE), name) # read particle data node - compute_P = insert_node!(graph, ComputeTaskABC_P()) # compute P node - data_Pu = insert_node!(graph, DataTask(PARTICLE_VALUE_SIZE)) # transfer data from P to u (one ParticleValue object) compute_u = insert_node!(graph, ComputeTaskABC_U()) # compute U node data_out = insert_node!(graph, DataTask(PARTICLE_VALUE_SIZE)) # transfer data out from u (one ParticleValue object) - insert_edge!(graph, data_in, compute_P) - insert_edge!(graph, compute_P, data_Pu) - insert_edge!(graph, data_Pu, compute_u) + insert_edge!(graph, data_in, compute_u) insert_edge!(graph, compute_u, data_out) # remember the data_out node for future edges - dataOutNodes[node] = data_out + data_out_nodes[node] = data_out elseif occursin(regex_c, node) capt = match(regex_c, node) @@ -140,12 +138,12 @@ function parse_dag(filename::AbstractString, proc::GenericABCProcess, verbose::B compute_S = insert_node!(graph, ComputeTaskABC_S1()) data_S_v = insert_node!(graph, DataTask(PARTICLE_VALUE_SIZE)) - insert_edge!(graph, dataOutNodes[in1], compute_S) + insert_edge!(graph, data_out_nodes[in1], compute_S) insert_edge!(graph, compute_S, data_S_v) insert_edge!(graph, data_S_v, compute_v) else - insert_edge!(graph, dataOutNodes[in1], compute_v) + insert_edge!(graph, data_out_nodes[in1], compute_v) end if (occursin(regex_c, in2)) @@ -154,16 +152,16 @@ function parse_dag(filename::AbstractString, proc::GenericABCProcess, verbose::B compute_S = insert_node!(graph, ComputeTaskABC_S1()) data_S_v = insert_node!(graph, DataTask(PARTICLE_VALUE_SIZE)) - insert_edge!(graph, dataOutNodes[in2], compute_S) + insert_edge!(graph, data_out_nodes[in2], compute_S) insert_edge!(graph, compute_S, data_S_v) insert_edge!(graph, data_S_v, compute_v) else - insert_edge!(graph, dataOutNodes[in2], compute_v) + insert_edge!(graph, data_out_nodes[in2], compute_v) end insert_edge!(graph, compute_v, data_out) - dataOutNodes[node] = data_out + data_out_nodes[node] = data_out elseif occursin(regex_m, node) # assume for now that only the first particle of the three is combined and the other two are "original" ones @@ -176,8 +174,8 @@ function parse_dag(filename::AbstractString, proc::GenericABCProcess, verbose::B compute_v = insert_node!(graph, ComputeTaskABC_V()) data_v = insert_node!(graph, DataTask(PARTICLE_VALUE_SIZE)) - insert_edge!(graph, dataOutNodes[in2], compute_v) - insert_edge!(graph, dataOutNodes[in3], compute_v) + insert_edge!(graph, data_out_nodes[in2], compute_v) + insert_edge!(graph, data_out_nodes[in3], compute_v) insert_edge!(graph, compute_v, data_v) # combine with the v of the combined other input @@ -185,7 +183,7 @@ function parse_dag(filename::AbstractString, proc::GenericABCProcess, verbose::B data_out = insert_node!(graph, DataTask(FLOAT_SIZE)) # output of a S2 task is only a float insert_edge!(graph, data_v, compute_S2) - insert_edge!(graph, dataOutNodes[in1], compute_S2) + insert_edge!(graph, data_out_nodes[in1], compute_S2) insert_edge!(graph, compute_S2, data_out) insert_edge!(graph, data_out, sum_node) @@ -201,7 +199,7 @@ function parse_dag(filename::AbstractString, proc::GenericABCProcess, verbose::B end #put all nodes into dirty nodes set - graph.dirtyNodes = copy(graph.nodes) + graph.dirty_nodes = copy(graph.nodes) if (verbose) println("Generating the graph's properties") diff --git a/src/abc/properties.jl b/src/abc/properties.jl index 1e4dd15..9c91f2a 100644 --- a/src/abc/properties.jl +++ b/src/abc/properties.jl @@ -28,13 +28,6 @@ Return the compute effort of a V task. """ compute_effort(t::ComputeTaskABC_V)::Float64 = 6.0 -""" - compute_effort(t::ComputeTaskABC_P) - -Return the compute effort of a P task. -""" -compute_effort(t::ComputeTaskABC_P)::Float64 = 0.0 - """ compute_effort(t::ComputeTaskABC_Sum) @@ -59,13 +52,6 @@ Return the number of children of a ComputeTaskABC_S2 (always 2). """ children(::ComputeTaskABC_S2) = 2 -""" - children(::ComputeTaskABC_P) - -Return the number of children of a ComputeTaskABC_P (always 1). -""" -children(::ComputeTaskABC_P) = 1 - """ children(::ComputeTaskABC_U) diff --git a/src/abc/types.jl b/src/abc/types.jl index f5ecf54..35d2bb9 100644 --- a/src/abc/types.jl +++ b/src/abc/types.jl @@ -6,11 +6,11 @@ Singleton definition for identification of the ABC-Model. struct ABCModel <: AbstractPhysicsModel end """ - PerturbativeABC <: AbstractModel + PerturbativeABC <: AbstractModelDefinition The model being used for the ABC model. """ -struct PerturbativeABC <: AbstractModelDefinition end +struct PerturbativeABC <: QEDbase.AbstractModelDefinition end """ ABCParticle @@ -54,13 +54,6 @@ S task with two children. """ struct ComputeTaskABC_S2 <: AbstractComputeTask end -""" - ComputeTaskABC_P <: AbstractComputeTask - -P task with no children. -""" -struct ComputeTaskABC_P <: AbstractComputeTask end - """ ComputeTaskABC_V <: AbstractComputeTask @@ -92,7 +85,6 @@ Constant vector of all tasks of the ABC-Model. ABC_TASKS = [ ComputeTaskABC_S1, ComputeTaskABC_S2, - ComputeTaskABC_P, ComputeTaskABC_V, ComputeTaskABC_U, ComputeTaskABC_Sum, diff --git a/src/interface.jl b/src/interface.jl index 1cf44ce..9a8c18f 100644 --- a/src/interface.jl +++ b/src/interface.jl @@ -5,7 +5,7 @@ import QEDbase.AbstractParticle Base type for a model, e.g. ABC-Model or QED. This is used to dispatch many functions. """ -abstract type AbstractPhysicsModel <: AbstractModel end +abstract type AbstractPhysicsModel end """ ParticleValue{ParticleType <: AbstractParticleStateful} @@ -99,9 +99,9 @@ Return the model of this process description or input. function model end """ - type_from_name(model::AbstractModel, name::String) + type_from_name(model::AbstractPhysicsModel, name::String) -For a name of a particle in the given `AbstractModel`, return the particle's `Type` and index as a tuple. The input string can be expetced to be of the form \"\". +For a name of a particle in the given `AbstractPhysicsModel`, return the particle's `Type` and index as a tuple. The input string can be expetced to be of the form \"\". """ function type_index_from_name end diff --git a/src/qed/compute.jl b/src/qed/compute.jl index 1c5d997..feb9861 100644 --- a/src/qed/compute.jl +++ b/src/qed/compute.jl @@ -51,15 +51,20 @@ function ComputableDAGs.compute( ) where {P1<:ParticleStateful,P2<:ParticleStateful,V1<:ValueType,V2<:ValueType} p3 = QED_conserve_momentum(data1.p, data2.p) state = QED_vertex() - if (typeof(data1.v) <: AdjointBiSpinor) - state = data1.v * state + if data1.v isa AdjointBiSpinor + @assert !(data2.v isa AdjointBiSpinor) + state = data1.v * state * data2.v + elseif data1.v isa BiSpinor + @assert !(data2.v isa BiSpinor) + state = data2.v * state * data1.v + elseif data2.v isa AdjointBiSpinor + @assert !(data1.v isa AdjointBiSpinor) + state = data2.v * state * data1.v + elseif data2.v isa BiSpinor + @assert !(data1.v isa BiSpinor) + state = data1.v * state * data2.v else - state = state * data1.v - end - if (typeof(data2.v) <: AdjointBiSpinor) - state = data2.v * state - else - state = state * data2.v + @assert "invalid V task inputs" end dataOut = ParticleValue(p3, state) @@ -88,9 +93,12 @@ function ComputableDAGs.compute( P1<:ParticleStateful{D1,S1,EL}, P2<:ParticleStateful{D2,S2,EL}, } - #@assert isapprox(data1.p.momentum, data2.p.momentum, rtol = sqrt(eps()), atol = sqrt(eps())) "$(data1.p.momentum) vs. $(data2.p.momentum)" + inner1 = QED_inner_edge(data1.p) + inner2 = QED_inner_edge(data2.p) - inner = QED_inner_edge(propagated_particle(data1.p)) + # TODO: This is broken. It's currently not possible (i think) to find out which of the two is the correct + # inner edge value to take. Likely the graph building has to be changed to provide the correct one first or similar. + inner = is_outgoing(data1.p) ? inner1 : inner2 # inner edge is just a "scalar", data1 and data2 are bispinor/adjointbispinnor, need to keep correct order if typeof(data1.v) <: BiSpinor @@ -106,8 +114,12 @@ function ComputableDAGs.compute( data2::ParticleValue{ParticleStateful{D2,Photon},V2}, ) where {D1<:ParticleDirection,D2<:ParticleDirection,V1<:ValueType,V2<:ValueType} # TODO: assert that data1 and data2 are opposites + @assert isapprox( + momentum(data1.p), momentum(data2.p), rtol=sqrt(eps()), atol=sqrt(eps()) + ) "$(momentum(data1.p)) vs. $(momentum(data2.p))" + inner = QED_inner_edge(data1.p) - # inner edge is just a scalar, data1 and data2 are photon states that are just Complex numbers here + return data1.v * inner * data2.v end @@ -119,12 +131,13 @@ Compute inner edge (1 input particle, 1 output particle). function ComputableDAGs.compute( ::ComputeTaskQED_S1, data::ParticleValue{P,V} ) where {P<:ParticleStateful,V<:ValueType} + inner = QED_inner_edge(data.p) new_p = propagated_particle(data.p) # inner edge is just a scalar, can multiply from either side if typeof(data.v) <: BiSpinor - return ParticleValue(new_p, QED_inner_edge(new_p) * data.v) + return ParticleValue(new_p, inner * data.v) else - return ParticleValue(new_p, data.v * QED_inner_edge(new_p)) + return ParticleValue(new_p, data.v * inner) end end @@ -132,24 +145,24 @@ end compute(::ComputeTaskQED_Sum, data...) compute(::ComputeTaskQED_Sum, data::AbstractArray) -Compute a sum over the vector. Use an algorithm that accounts for accumulated errors in long sums with potentially large differences in magnitude of the summands. +Compute a sum over the vector and return the `abs2()` of it. Linearly many FLOP with growing data. """ -function ComputableDAGs.compute(::ComputeTaskQED_Sum, data...)::ComplexF64 +function ComputableDAGs.compute(::ComputeTaskQED_Sum, data...) # TODO: want to use sum_kbn here but it doesn't seem to support ComplexF64, do it element-wise? s = 0.0im for d in data s += d end - return s + return abs2(s) end -function ComputableDAGs.compute(::ComputeTaskQED_Sum, data::AbstractArray)::ComplexF64 +function ComputableDAGs.compute(::ComputeTaskQED_Sum, data::AbstractArray) # TODO: want to use sum_kbn here but it doesn't seem to support ComplexF64, do it element-wise? s = 0.0im for d in data s += d end - return s + return abs2(s) end diff --git a/src/qed/create.jl b/src/qed/create.jl index 85bc599..3b5b659 100644 --- a/src/qed/create.jl +++ b/src/qed/create.jl @@ -49,11 +49,11 @@ function gen_process_input(processDescription::ScatteringProcess) end """ - gen_graph(process_description::ScatteringProcess) + graph(process_description::ScatteringProcess) For a given `QEDprocesses.ScatteringProcess`, return the `DAG` that computes it. """ -function gen_graph(process_description::ScatteringProcess) +function ComputableDAGs.graph(process_description::ScatteringProcess) initial_diagram = FeynmanDiagram(process_description) diagrams = gen_diagrams(initial_diagram) @@ -69,7 +69,7 @@ function gen_graph(process_description::ScatteringProcess) insert_edge!(graph, sum_node, global_data_out) # remember the data out nodes for connection - dataOutNodes = Dict() + data_out_nodes = Dict() for particle in initial_diagram.particles # generate data in and U tasks @@ -81,7 +81,7 @@ function gen_graph(process_description::ScatteringProcess) insert_edge!(graph, compute_u, data_out) # remember the data_out node for future edges - dataOutNodes[String(particle)] = data_out + data_out_nodes[String(particle)] = data_out end # TODO: this should be parallelizable somewhat easily @@ -91,8 +91,8 @@ function gen_graph(process_description::ScatteringProcess) # handle the vertices for vertices in diagram.vertices for vertex in vertices - data_in1 = dataOutNodes[String(vertex.in1)] - data_in2 = dataOutNodes[String(vertex.in2)] + data_in1 = data_out_nodes[String(vertex.in1)] + data_in2 = data_out_nodes[String(vertex.in2)] compute_V = insert_node!(graph, ComputeTaskQED_V()) # compute vertex @@ -105,7 +105,7 @@ function gen_graph(process_description::ScatteringProcess) if (vertex.out == tie.in1 || vertex.out == tie.in2) # out particle is part of the tie -> there will be an S2 task with it later, don't make S1 task - dataOutNodes[String(vertex.out)] = data_V_out + data_out_nodes[String(vertex.out)] = data_V_out continue end @@ -119,13 +119,13 @@ function gen_graph(process_description::ScatteringProcess) insert_edge!(graph, compute_S1, data_S1_out) # overrides potentially different nodes from previous diagrams, which is intentional - dataOutNodes[String(vertex.out)] = data_S1_out + data_out_nodes[String(vertex.out)] = data_S1_out end end # handle the tie - data_in1 = dataOutNodes[String(tie.in1)] - data_in2 = dataOutNodes[String(tie.in2)] + data_in1 = data_out_nodes[String(tie.in1)] + data_in2 = data_out_nodes[String(tie.in2)] compute_S2 = insert_node!(graph, ComputeTaskQED_S2()) diff --git a/src/qed/particle.jl b/src/qed/particle.jl index 8552b2c..c44f025 100644 --- a/src/qed/particle.jl +++ b/src/qed/particle.jl @@ -225,7 +225,11 @@ Return the factor of a vertex in a QED feynman diagram. end @inline function QED_inner_edge(p::ParticleStateful) - return propagator(particle_species(p), momentum(p)) + if is_outgoing(p) + return propagator(particle_species(p), momentum(p)) + else + return propagator(particle_species(p), -momentum(p)) + end end """ @@ -245,12 +249,18 @@ function QED_conserve_momentum(p1::AbstractParticleStateful, p2::AbstractParticl end p3_mom = p1_mom + p2_mom - if (particle_direction(P3) isa Incoming) - return parameterless(typeof(p1))( - particle_direction(P3), particle_species(P3), -p3_mom - ) + if (is_incoming(particle_direction(P3))) + p3_mom *= -1 end - return parameterless(typeof(p1))(particle_direction(P3), particle_species(P3), p3_mom) + + p3 = parameterless(typeof(p1))(particle_direction(P3), particle_species(P3), p3_mom) + + #=print("$(is_incoming(p1) ? "in" : "out") $(particle_species(p1)) + ") + print("$(is_incoming(p2) ? "in" : "out") $(particle_species(p2)) -> ") + println("$(is_incoming(p3) ? "in" : "out") $(particle_species(p3))") + println("$(momentum(p1)[1]) + $(momentum(p2)[1]) -> $(p3_mom[1])")=# + + return p3 end """ diff --git a/src/qed/properties.jl b/src/qed/properties.jl index 93bfa85..99d2d82 100644 --- a/src/qed/properties.jl +++ b/src/qed/properties.jl @@ -29,13 +29,6 @@ Return the compute effort of a V task. """ compute_effort(t::ComputeTaskQED_V)::Float64 = (1150.0 + 764.0 + 828.0) / 3.0 -""" - compute_effort(t::ComputeTaskQED_P) - -Return the compute effort of a P task. -""" -compute_effort(t::ComputeTaskQED_P)::Float64 = 0.0 - """ compute_effort(t::ComputeTaskQED_Sum) @@ -60,13 +53,6 @@ Return the number of children of a ComputeTaskQED_S2 (always 2). """ children(::ComputeTaskQED_S2) = 2 -""" - children(::ComputeTaskQED_P) - -Return the number of children of a ComputeTaskQED_P (always 1). -""" -children(::ComputeTaskQED_P) = 1 - """ children(::ComputeTaskQED_U) diff --git a/src/qed/types.jl b/src/qed/types.jl index af35150..e42568c 100644 --- a/src/qed/types.jl +++ b/src/qed/types.jl @@ -19,13 +19,6 @@ S task with two children. """ struct ComputeTaskQED_S2 <: AbstractComputeTask end -""" - ComputeTaskQED_P <: AbstractComputeTask - -P task with no children. -""" -struct ComputeTaskQED_P <: AbstractComputeTask end - """ ComputeTaskQED_V <: AbstractComputeTask @@ -57,7 +50,6 @@ Constant vector of all tasks of the QED-Model. QED_TASKS = [ ComputeTaskQED_S1, ComputeTaskQED_S2, - ComputeTaskQED_P, ComputeTaskQED_V, ComputeTaskQED_U, ComputeTaskQED_Sum, diff --git a/test/known_graphs.jl b/test/known_graphs.jl index 802fbcb..e84bb42 100644 --- a/test/known_graphs.jl +++ b/test/known_graphs.jl @@ -31,10 +31,10 @@ function test_random_walk(RNG, g::DAG, n::Int64) # choose one of split/reduce option = rand(RNG, 1:2) - if option == 1 && !isempty(opt.nodeReductions) - push_operation!(g, rand(RNG, collect(opt.nodeReductions))) - elseif option == 2 && !isempty(opt.nodeSplits) - push_operation!(g, rand(RNG, collect(opt.nodeSplits))) + if option == 1 && !isempty(opt.node_reductions) + push_operation!(g, rand(RNG, collect(opt.node_reductions))) + elseif option == 2 && !isempty(opt.node_splits) + push_operation!(g, rand(RNG, collect(opt.node_splits))) else i = i - 1 end diff --git a/test/node_reduction.jl b/test/node_reduction.jl index 2c5b626..d668059 100644 --- a/test/node_reduction.jl +++ b/test/node_reduction.jl @@ -58,32 +58,32 @@ insert_edge!(graph, CD, C1C) opt = get_operations(graph) -@test length(opt) == (nodeReductions=1, nodeSplits=1) +@test length(opt) == (node_reductions=1, node_splits=1) -nr = first(opt.nodeReductions) +nr = first(opt.node_reductions) @test Set(nr.input) == Set([B1C_1, B1C_2]) push_operation!(graph, nr) opt = get_operations(graph) -@test length(opt) == (nodeReductions=1, nodeSplits=1) +@test length(opt) == (node_reductions=1, node_splits=1) -nr = first(opt.nodeReductions) +nr = first(opt.node_reductions) @test Set(nr.input) == Set([B1D_1, B1D_2]) push_operation!(graph, nr) opt = get_operations(graph) @test is_valid(graph) -@test length(opt) == (nodeReductions=0, nodeSplits=1) +@test length(opt) == (node_reductions=0, node_splits=1) pop_operation!(graph) opt = get_operations(graph) -@test length(opt) == (nodeReductions=1, nodeSplits=1) +@test length(opt) == (node_reductions=1, node_splits=1) reset_graph!(graph) opt = get_operations(graph) -@test length(opt) == (nodeReductions=1, nodeSplits=1) +@test length(opt) == (node_reductions=1, node_splits=1) @test is_valid(graph) diff --git a/test/unit_tests_estimator.jl b/test/unit_tests_estimator.jl index 48c7bea..78201c6 100644 --- a/test/unit_tests_estimator.jl +++ b/test/unit_tests_estimator.jl @@ -8,8 +8,8 @@ function test_op_specific(estimator, graph, nr::NodeReduction) compute_effort_reduce = compute_effort(nr.input[1].task) * (length(nr.input) - 1) @test isapprox(estimate.data, -data_reduce; atol=eps(Float64)) - @test isapprox(estimate.computeEffort, -compute_effort_reduce) - @test isapprox(estimate.computeIntensity, compute_effort_reduce / data_reduce) + @test isapprox(estimate.compute_effort, -compute_effort_reduce) + @test isapprox(estimate.compute_intensity, compute_effort_reduce / data_reduce) return nothing end @@ -23,8 +23,8 @@ function test_op_specific(estimator, graph, ns::NodeSplit) compute_effort_increase = compute_effort(ns.input.task) * copies @test isapprox(estimate.data, data_increase; atol=eps(Float64)) - @test isapprox(estimate.computeEffort, compute_effort_increase) - @test isapprox(estimate.computeIntensity, compute_effort_increase / data_increase) + @test isapprox(estimate.compute_effort, compute_effort_increase) + @test isapprox(estimate.compute_intensity, compute_effort_increase / data_increase) return nothing end @@ -40,18 +40,19 @@ function test_op(estimator, graph, op) @test isapprox((estimate_before + estimate).data, estimate_after_apply.data) @test isapprox( - (estimate_before + estimate).computeEffort, estimate_after_apply.computeEffort + (estimate_before + estimate).compute_effort, estimate_after_apply.compute_effort ) @test isapprox( - (estimate_before + estimate).computeIntensity, estimate_after_apply.computeIntensity + (estimate_before + estimate).compute_intensity, + estimate_after_apply.compute_intensity, ) test_op_specific(estimator, graph, op) return nothing end -@testset "Global Metric Estimator" for (graph_string, exp_data, exp_computeEffort) in - zip(["AB->AB", "AB->ABBB"], [976, 10944], [53, 1075]) +@testset "Global Metric Estimator" for (graph_string, exp_data, exp_compute_effort) in + zip(["AB->AB", "AB->ABBB"], [784, 10656], [53, 1075]) estimator = GlobalMetricEstimator() @test cost_type(estimator) == CDCost @@ -63,14 +64,14 @@ end estimate = graph_cost(estimator, graph) @test estimate.data == exp_data - @test estimate.computeEffort == exp_computeEffort - @test isapprox(estimate.computeIntensity, exp_computeEffort / exp_data) + @test estimate.compute_effort == exp_compute_effort + @test isapprox(estimate.compute_intensity, exp_compute_effort / exp_data) end @testset "Operation Cost" begin ops = get_operations(graph) - nrs = copy(ops.nodeReductions) - nss = copy(ops.nodeSplits) + nrs = copy(ops.node_reductions) + nss = copy(ops.node_splits) for nr in nrs test_op(estimator, graph, nr) diff --git a/test/unit_tests_execution.jl b/test/unit_tests_execution.jl index 7a6d0da..4088502 100644 --- a/test/unit_tests_execution.jl +++ b/test/unit_tests_execution.jl @@ -95,15 +95,6 @@ expected_result = ground_truth_graph_result(particles_2_2) for _ in 1:10 # test in a loop because graph layout should not change the result graph = parse_dag(joinpath(@__DIR__, "..", "input", "AB->AB.txt"), process_2_2) - @test isapprox( - execute(graph, process_2_2, machine, particles_2_2, @__MODULE__), - expected_result; - rtol=RTOL, - ) - - # graph should be fully scheduled after being executed - @test is_scheduled(graph) - func = get_compute_function(graph, process_2_2, machine, @__MODULE__) @test isapprox(func(particles_2_2), expected_result; rtol=RTOL) end @@ -116,11 +107,8 @@ end @test is_valid(graph) - @test isapprox( - execute(graph, process_2_2, machine, particles_2_2, @__MODULE__), - expected_result; - rtol=RTOL, - ) + func = get_compute_function(graph, process_2_2, machine, @__MODULE__) + @test isapprox(func(particles_2_2), expected_result; rtol=RTOL) # graph should be fully scheduled after being executed @test is_scheduled(graph) @@ -140,12 +128,6 @@ expected_result = groundtruth_func(particles_2_4) for _ in 1:5 # test in a loop because graph layout should not change the result graph = parse_dag(joinpath(@__DIR__, "..", "input", "AB->ABBB.txt"), process_2_4) - @test isapprox( - execute(graph, process_2_4, machine, particles_2_4, @__MODULE__), - expected_result; - rtol=RTOL, - ) - func = get_compute_function(graph, process_2_4, machine, @__MODULE__) @test isapprox(func(particles_2_4), expected_result; rtol=RTOL) end @@ -159,7 +141,8 @@ TODO: fix precision(?) issues optimize!(RandomWalkOptimizer(RNG), graph, 100) @test is_valid(graph) - @test isapprox(execute(graph, process_2_4, machine, particles_2_4, @__MODULE__), expected_result; rtol = RTOL) + func = get_compute_function(graph, process_2_4, machine, @__MODULE__) + @test isapprox(func(particles_2_4), expected_result; rtol=RTOL) end end =# @@ -167,10 +150,12 @@ end @testset "$(process) after random walk" for process in ["ke->ke", "ke->kke", "ke->kkke"] process = parse_process("ke->kkke", QEDModel()) inputs = [gen_process_input(process) for _ in 1:100] - graph = gen_graph(process) - gt = execute.(Ref(graph), Ref(process), Ref(machine), inputs, Ref(@__MODULE__)) + graph = graph(process) + + f_gt = get_compute_function(graph, process, machine, @__MODULE__) + gt = f_gt.(inputs) for i in 1:50 - graph = gen_graph(process) + graph = graph(process) optimize!(RandomWalkOptimizer(RNG), graph, 100) @test is_valid(graph) diff --git a/test/unit_tests_graph.jl b/test/unit_tests_graph.jl index 631bc81..825ec1e 100644 --- a/test/unit_tests_graph.jl +++ b/test/unit_tests_graph.jl @@ -4,23 +4,23 @@ using ComputableDAGs graph = DAG() @test length(graph.nodes) == 0 -@test length(graph.appliedOperations) == 0 -@test length(graph.operationsToApply) == 0 -@test length(graph.dirtyNodes) == 0 +@test length(graph.applied_operations) == 0 +@test length(graph.operations_to_apply) == 0 +@test length(graph.dirty_nodes) == 0 @test length(graph.diff) == (addedNodes=0, removedNodes=0, addedEdges=0, removedEdges=0) -@test length(get_operations(graph)) == (nodeReductions=0, nodeSplits=0) +@test length(get_operations(graph)) == (node_reductions=0, node_splits=0) # s to output (exit node) d_exit = insert_node!(graph, DataTask(10)) @test length(graph.nodes) == 1 -@test length(graph.dirtyNodes) == 0 +@test length(graph.dirty_nodes) == 0 # final s compute s0 = insert_node!(graph, ComputeTaskABC_S2()) @test length(graph.nodes) == 2 -@test length(graph.dirtyNodes) == 0 +@test length(graph.dirty_nodes) == 0 # data from v0 and v1 to s0 d_v0_s0 = insert_node!(graph, DataTask(5)) @@ -42,42 +42,20 @@ uA = insert_node!(graph, ComputeTaskABC_U()) uBp = insert_node!(graph, ComputeTaskABC_U()) uAp = insert_node!(graph, ComputeTaskABC_U()) -# data from PB, PA, PBp and PAp to uB, uA, uBp and uAp -d_PB_uB = insert_node!(graph, DataTask(6)) -d_PA_uA = insert_node!(graph, DataTask(6)) -d_PBp_uBp = insert_node!(graph, DataTask(6)) -d_PAp_uAp = insert_node!(graph, DataTask(6)) +# entry nodes getting data for U computes +d_uB = insert_node!(graph, DataTask(6)) +d_uA = insert_node!(graph, DataTask(6)) +d_uBp = insert_node!(graph, DataTask(6)) +d_uAp = insert_node!(graph, DataTask(6)) -# P computes PB, PA, PBp and PAp -PB = insert_node!(graph, ComputeTaskABC_P()) -PA = insert_node!(graph, ComputeTaskABC_P()) -PBp = insert_node!(graph, ComputeTaskABC_P()) -PAp = insert_node!(graph, ComputeTaskABC_P()) - -# entry nodes getting data for P computes -d_PB = insert_node!(graph, DataTask(4)) -d_PA = insert_node!(graph, DataTask(4)) -d_PBp = insert_node!(graph, DataTask(4)) -d_PAp = insert_node!(graph, DataTask(4)) - -@test length(graph.nodes) == 26 -@test length(graph.dirtyNodes) == 0 +@test length(graph.nodes) == 18 +@test length(graph.dirty_nodes) == 0 # now for all the edges -insert_edge!(graph, d_PB, PB) -insert_edge!(graph, d_PA, PA) -insert_edge!(graph, d_PBp, PBp) -insert_edge!(graph, d_PAp, PAp) - -insert_edge!(graph, PB, d_PB_uB) -insert_edge!(graph, PA, d_PA_uA) -insert_edge!(graph, PBp, d_PBp_uBp) -insert_edge!(graph, PAp, d_PAp_uAp) - -insert_edge!(graph, d_PB_uB, uB) -insert_edge!(graph, d_PA_uA, uA) -insert_edge!(graph, d_PBp_uBp, uBp) -insert_edge!(graph, d_PAp_uAp, uAp) +insert_edge!(graph, d_uB, uB) +insert_edge!(graph, d_uA, uA) +insert_edge!(graph, d_uBp, uBp) +insert_edge!(graph, d_uAp, uAp) insert_edge!(graph, uB, d_uB_v0) insert_edge!(graph, uA, d_uA_v0) @@ -97,10 +75,10 @@ insert_edge!(graph, d_v1_s0, s0) insert_edge!(graph, s0, d_exit) -@test length(graph.nodes) == 26 -@test length(graph.appliedOperations) == 0 -@test length(graph.operationsToApply) == 0 -@test length(graph.dirtyNodes) == 0 +@test length(graph.nodes) == 18 +@test length(graph.applied_operations) == 0 +@test length(graph.operations_to_apply) == 0 +@test length(graph.dirty_nodes) == 0 @test length(graph.diff) == (addedNodes=0, removedNodes=0, addedEdges=0, removedEdges=0) @test is_valid(graph) @@ -128,43 +106,43 @@ insert_edge!(graph, s0, d_exit) @test length(siblings(s0)) == 1 operations = get_operations(graph) -@test length(operations) == (nodeReductions=0, nodeSplits=0) -@test length(graph.dirtyNodes) == 0 +@test length(operations) == (node_reductions=0, node_splits=0) +@test length(graph.dirty_nodes) == 0 @test operations == get_operations(graph) properties = get_properties(graph) -@test properties.computeEffort == 28 +@test properties.compute_effort == 28 @test properties.data == 62 -@test properties.computeIntensity ≈ 28 / 62 -@test properties.noNodes == 26 -@test properties.noEdges == 25 +@test properties.compute_intensity ≈ 28 / 62 +@test properties.number_of_nodes == 26 +@test properties.number_of_edges == 25 operations = get_operations(graph) -@test length(graph.dirtyNodes) == 0 +@test length(graph.dirty_nodes) == 0 -@test length(operations) == (nodeReductions=0, nodeSplits=0) +@test length(operations) == (node_reductions=0, node_splits=0) @test isempty(operations) -@test length(graph.dirtyNodes) == 0 +@test length(graph.dirty_nodes) == 0 @test length(graph.nodes) == 26 -@test length(graph.appliedOperations) == 0 -@test length(graph.operationsToApply) == 0 +@test length(graph.applied_operations) == 0 +@test length(graph.operations_to_apply) == 0 reset_graph!(graph) -@test length(graph.dirtyNodes) == 0 +@test length(graph.dirty_nodes) == 0 @test length(graph.nodes) == 26 -@test length(graph.appliedOperations) == 0 -@test length(graph.operationsToApply) == 0 +@test length(graph.applied_operations) == 0 +@test length(graph.operations_to_apply) == 0 properties = get_properties(graph) -@test properties.noNodes == 26 -@test properties.noEdges == 25 -@test properties.computeEffort == 28 +@test properties.number_of_nodes == 26 +@test properties.number_of_edges == 25 +@test properties.compute_effort == 28 @test properties.data == 62 -@test properties.computeIntensity ≈ 28 / 62 +@test properties.compute_intensity ≈ 28 / 62 operations = get_operations(graph) -@test length(operations) == (nodeReductions=0, nodeSplits=0) +@test length(operations) == (node_reductions=0, node_splits=0) @test is_valid(graph) diff --git a/test/unit_tests_nodes.jl b/test/unit_tests_nodes.jl index 53e75c3..d16467b 100644 --- a/test/unit_tests_nodes.jl +++ b/test/unit_tests_nodes.jl @@ -3,7 +3,7 @@ using ComputableDAGs nC1 = make_node(QEDFeynman.ComputeTaskABC_U()) nC2 = make_node(QEDFeynman.ComputeTaskABC_V()) -nC3 = make_node(QEDFeynman.ComputeTaskABC_P()) +nC3 = make_node(QEDFeynman.ComputeTaskABC_S1()) nC4 = make_node(QEDFeynman.ComputeTaskABC_Sum()) nD1 = make_node(DataTask(10)) diff --git a/test/unit_tests_properties.jl b/test/unit_tests_properties.jl index 627b5f0..096ca99 100644 --- a/test/unit_tests_properties.jl +++ b/test/unit_tests_properties.jl @@ -4,35 +4,43 @@ using ComputableDAGs prop = GraphProperties() @test prop.data == 0.0 -@test prop.computeEffort == 0.0 -@test prop.computeIntensity == 0.0 -@test prop.noNodes == 0.0 -@test prop.noEdges == 0.0 +@test prop.compute_effort == 0.0 +@test prop.compute_intensity == 0.0 +@test prop.number_of_nodes == 0.0 +@test prop.number_of_edges == 0.0 prop2 = ( - data=5.0, computeEffort=6.0, computeIntensity=6.0 / 5.0, noNodes=2, noEdges=3 + data=5.0, + compute_effort=6.0, + compute_intensity=6.0 / 5.0, + number_of_nodes=2, + number_of_edges=3, )::GraphProperties @test prop + prop2 == prop2 @test prop2 - prop == prop2 -negProp = -prop2 -@test negProp.data == -5.0 -@test negProp.computeEffort == -6.0 -@test negProp.computeIntensity == 6.0 / 5.0 -@test negProp.noNodes == -2 -@test negProp.noEdges == -3 +neg_prop = -prop2 +@test neg_prop.data == -5.0 +@test neg_prop.compute_effort == -6.0 +@test neg_prop.compute_intensity == 6.0 / 5.0 +@test neg_prop.number_of_nodes == -2 +@test neg_prop.number_of_edges == -3 -@test negProp + prop2 == GraphProperties() +@test neg_prop + prop2 == GraphProperties() prop3 = ( - data=7.0, computeEffort=3.0, computeIntensity=7.0 / 3.0, noNodes=-3, noEdges=2 + data=7.0, + compute_effort=3.0, + compute_intensity=7.0 / 3.0, + number_of_nodes=-3, + number_of_edges=2, )::GraphProperties -propSum = prop2 + prop3 +prop_sum = prop2 + prop3 -@test propSum.data == 12.0 -@test propSum.computeEffort == 9.0 -@test propSum.computeIntensity == 9.0 / 12.0 -@test propSum.noNodes == -1 -@test propSum.noEdges == 5 +@test prop_sum.data == 12.0 +@test prop_sum.compute_effort == 9.0 +@test prop_sum.compute_intensity == 9.0 / 12.0 +@test prop_sum.number_of_nodes == -1 +@test prop_sum.number_of_edges == 5 diff --git a/test/unit_tests_qedmodel.jl b/test/unit_tests_qedmodel.jl index 5b5ed96..0187bfd 100644 --- a/test/unit_tests_qedmodel.jl +++ b/test/unit_tests_qedmodel.jl @@ -260,7 +260,7 @@ end compton_function = get_compute_function(graph, process, machine, @__MODULE__) @test isapprox(compton_function.(input), compton_groundtruth.(input)) - graph_generated = gen_graph(process) + graph_generated = graph(process) compton_function = get_compute_function(graph_generated, process, machine, @__MODULE__) @test isapprox(compton_function.(input), compton_groundtruth.(input)) @@ -274,7 +274,7 @@ end model = QEDModel() process = parse_process(proc_str, model) machine = cpu_st() - graph = gen_graph(process) + graph = graph(process) compute_function = get_compute_function(graph, process, machine, @__MODULE__) diff --git a/test/unit_tests_tasks.jl b/test/unit_tests_tasks.jl index 6a8f9ce..c485f58 100644 --- a/test/unit_tests_tasks.jl +++ b/test/unit_tests_tasks.jl @@ -5,7 +5,6 @@ S1 = QEDFeynman.ComputeTaskABC_S1() S2 = QEDFeynman.ComputeTaskABC_S2() U = QEDFeynman.ComputeTaskABC_U() V = QEDFeynman.ComputeTaskABC_V() -P = QEDFeynman.ComputeTaskABC_P() Sum = QEDFeynman.ComputeTaskABC_Sum() Data10 = DataTask(10) @@ -15,7 +14,6 @@ Data20 = DataTask(20) @test compute_effort(S2) == 12 @test compute_effort(U) == 1 @test compute_effort(V) == 6 -@test compute_effort(P) == 0 @test compute_effort(Sum) == 1 @test compute_effort(Data10) == 0 @test compute_effort(Data20) == 0 @@ -24,7 +22,6 @@ Data20 = DataTask(20) @test data(S2) == 0 @test data(U) == 0 @test data(V) == 0 -@test data(P) == 0 @test data(Sum) == 0 @test data(Data10) == 10 @test data(Data20) == 20