import jpype
import jpype.imports
import numpy as np
import pandas as pd
from jpype.types import *
classpath = "lib/bayesserver-10.8.jar"
jpype.startJVM(classpath=[classpath])
from com.bayesserver import *
from com.bayesserver.inference import *
from com.bayesserver.data import *
from jpype import java
from data_frame_utils import to_data_table
def create_data_reader_command() -> DataReaderCommand:
"""
Manually create a pandas DataFrame here to keep the example self contained.
We could also use a pandas DataFrame loaded from a file, or
even create a DatabaseDataReaderCommand that connects directly to a database.
"""
df = pd.DataFrame({
"Waste type": ["Industrial", "Household", np.nan, "Industrial", "Industrial"],
"CO2 concentration": [-1.7, -2.0, -1.7, np.nan, 0.5]})
return DataTableDataReaderCommand(to_data_table(df))
def create_evidence_reader_command(network: Network) -> EvidenceReaderCommand:
"""
Create an evidence reader command, than maps data to variables.
This can connect to a database, a pandas DataFrame, or you can create a custom reader.
"""
data_reader_command = create_data_reader_command()
waste_type = network.getVariables().get("Waste type", True)
co2_concentration = network.getVariables().get("CO2 concentration", True)
return DefaultEvidenceReaderCommand(
data_reader_command,
java.util.Arrays.asList([
VariableReference(waste_type, ColumnValueType.NAME, waste_type.getName()),
VariableReference(co2_concentration, ColumnValueType.VALUE, co2_concentration.getName())
]),
ReaderOptions()
)
network = Network()
network.load("networks/Waste.bayes")
variables = network.getVariables()
burning_regimen = variables.get("Burning Regimen", True)
waste_type = variables.get("Waste type", True)
filter_state = variables.get("Filter state", True)
burning_regimen_stable = burning_regimen.getStates().get("Stable", True)
filter_efficiency = variables.get("Filter efficiency", True)
dust_emission = variables.get("Dust emission", True)
metals_in_waste = variables.get("Metals in waste", True)
co2_concentration = variables.get("CO2 concentration", True)
light_penetrability = variables.get("Light penetrability", True)
metals_emission = variables.get("Metals emission", True)
factory = RelevanceTreeInferenceFactory()
inference = factory.createInferenceEngine(network)
query_options = factory.createQueryOptions()
query_output = factory.createQueryOutput()
query_burning_regimen = Table(burning_regimen)
inference.getQueryDistributions().add(query_burning_regimen)
query_light_penetrability = CLGaussian(light_penetrability)
inference.getQueryDistributions().add(query_light_penetrability)
query_joint = CLGaussian([filter_efficiency, burning_regimen])
inference.getQueryDistributions().add(query_joint)
query_options.setLogLikelihood(True)
evidence_reader_command = create_evidence_reader_command(network)
read_options = DefaultReadOptions()
evidence_reader = evidence_reader_command.executeReader()
evidence = inference.getEvidence()
while evidence_reader.read(evidence, read_options):
inference.query(query_options, query_output)
log_likelihood = query_output.getLogLikelihood()
prob_stable = query_burning_regimen.get(burning_regimen_stable)
mean_light_penetrability = query_light_penetrability.getMean(light_penetrability)
variance_light_penetrability = query_light_penetrability.getVariance(light_penetrability)
joint_element = query_joint.getMean(filter_efficiency, burning_regimen_stable)
print(f"{log_likelihood}\t{prob_stable}\t{mean_light_penetrability}\t{variance_light_penetrability}\t{joint_element}")
evidence_reader.close()