Skip to content

Commit

Permalink
various observabilities and hidden node variations added
Browse files Browse the repository at this point in the history
  • Loading branch information
jbrouill committed Jul 4, 2022
1 parent 8c56f0f commit c463f25
Show file tree
Hide file tree
Showing 6 changed files with 55 additions and 57 deletions.
6 changes: 3 additions & 3 deletions conf/identification.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@

# Regularization parameters
lambda_lasso = 4e-10
#1e8 for weighted CIGRE, 2e-6 for unweighted CIGRE or ieee33, 2e-7 for unweighted ieee123
lambda_eiv = 2e-7
lambdaprime = 200 #20
#1e8 for laplacian weighted CIGRE (or 4e5 otherwise), 2e-6 for unweighted CIGRE or ieee33, 2e-7 for unweighted ieee123
lambda_eiv = 1e-6
lambdaprime = 200 #200 for ieee123, 20 for others

# Tolerance for stopping the iterative algorithm
abs_tol = 1e-13
Expand Down
23 changes: 11 additions & 12 deletions conf/simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,20 @@
from conf.conf import noise_level

# Definition of observed nodes for ieee123bus network
list1 = [1, 15, 17, 19, 24, 39, 43, 53] #15%
list2 = [3, 18, 32, 37, 28, 47, 52] #30%
list3 = [4, 10, 16, 40, 51] #40%
list4 = [8, 9, 22, 36, 49, 46] #50%
list5 = [6, 12, 55, 26, 44, 50] #60%
list6 = [5, 14, 15, 20, 25, 29, 31, 34, 38, 41, 45, 54] #80%

#[12, 6, 36, 9, 46, 49]
observed_nodes = list1 + list2 + [4, 10, 16, 40]# + list3 #+ list4 + list5 #+ list6
hidden_nodes = list(set(range(1,56)) - set(np.array(observed_nodes)))
#hidden_nodes = [54]
list1 = [3, 16, 18, 28, 40, 47, 52] #
list2 = [8, 9, 22, 36, 49, 46, 51] #
list3 = [6, 12, 55, 26, 44, 50] #36% (20 nodes
list4 = [1, 4, 10, 15, 17, 37] #47% (26
list5 = [19, 24, 32, 39, 43, 53] #57% (32
list6 = [5, 14, 15, 20, 25, 29, 31, 34, 38, 41, 45, 54] #79%

observed_nodes = list1 + list2 + list3 + list4 + list5 #+ list6
hidden_nodes = list(set(range(1,56)) - set(np.array(observed_nodes)-1))
#hidden_nodes = []

# Assumption of constant Z loads on hidden nodes
constant_load_hidden_nodes = True
load_constantness = 0.95
load_constantness = 0.84

# Week during which the sample size starts
selected_weeks = np.array([0]) # 0 is week 12 for the small dataset. Replace by 12 of using the full year data.
Expand Down
9 changes: 5 additions & 4 deletions identify_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,11 @@
@click.option('--laplacian', "-l", is_flag=True, help='Is the matrix laplacian')
@click.option('--weighted', "-w", is_flag=True, help='Use covariance matrices as weights')
@click.option('--uncertainty', "-u", is_flag=True, help='Analyse error covariance')
@click.option('--unsynchronized', "-r", is_flag=True, help='Simulate smart meter data without phase synchronization')
@click.option('--verbose', "-v", is_flag=True, help='Activates verbosity')

def identify(network, max_iterations, standard, bayesian_eiv, continue_id,
phases, sequence, exact, laplacian, weighted, uncertainty, verbose):
def identify(network, max_iterations, standard, bayesian_eiv, continue_id, phases,
sequence, exact, laplacian, weighted, uncertainty, unsynchronized, verbose):
if verbose:
def pprint(a):
print(a)
Expand Down Expand Up @@ -95,7 +96,7 @@ def pprint(a):
current_magnitude_sd + 1j*current_phase_sd,
pmu_ratings, y_init, y_exact, laplacian,
weighted, max_iterations if redo_STLS else 0,
verbose)
not unsynchronized, verbose)

if continue_id:
sparse_tls_cov_iterations = sparse_tls_cov_old_iterations.extend(sparse_tls_cov_iterations)
Expand All @@ -106,7 +107,7 @@ def pprint(a):
pprint("Done!")

# Error covariance analysis
if uncertainty:
if uncertainty and not three_phases and not unsynchronized:
fim_wtls, cov_wtls, expected_max_rrms = run_fcns.eiv_fim(name, voltage, current,
voltage_magnitude_sd + 1j * voltage_phase_sd,
current_magnitude_sd + 1j * current_phase_sd,
Expand Down
65 changes: 31 additions & 34 deletions src/identification/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
transformation_matrix, unvectorize_matrix, elimination_sym_matrix, elimination_lap_matrix
from src.models.regression import ComplexRegression, BayesianRegression
from src.models.error_in_variable import TotalLeastSquares, BayesianEIVRegression
from src.models.noise_transformation import average_true_noise_covariance
from src.models.noise_transformation import average_true_noise_covariance, naive_noise_covariance
from src.models.smooth_prior import SmoothPrior, SparseSmoothPrior
from conf.conf import DATA_DIR, seed
from conf import identification
Expand Down Expand Up @@ -170,7 +170,7 @@ def add_known_lines(nodes, prior, lambdaprime, y_bus, laplacian=False, prior_typ
tls_bus_weights = np.zeros_like(y_bus)
tls_bus_centers = np.zeros_like(y_bus)

if prior_type == "optimal":
if "optimal" in prior_type:
# Node 2
tls_bus_weights[0, :], tls_bus_weights[:, 0] = (1+1j), (1+1j)
tls_bus_weights[0, 0] = 0
Expand All @@ -196,7 +196,7 @@ def add_known_lines(nodes, prior, lambdaprime, y_bus, laplacian=False, prior_typ
#tls_bus_weights[37, 38], tls_bus_weights[38, 37] = (1+1j), (1+1j)
#tls_bus_centers[37, 38], tls_bus_centers[38, 37] = y_bus[37, 38], y_bus[38, 37]

elif prior_type == "suboptimal":
elif "suboptimal" in prior_type:
# Node 10
tls_bus_weights[7, :], tls_bus_weights[:, 7] = (1+1j), (1+1j)
tls_bus_weights[7, 7] = 0
Expand All @@ -220,33 +220,14 @@ def add_known_lines(nodes, prior, lambdaprime, y_bus, laplacian=False, prior_typ
#tls_bus_centers[3, 36], tls_bus_centers[36, 3] = y_bus[3, 36], y_bus[36, 3]
#tls_bus_weights[37, 38], tls_bus_weights[38, 37] = (1+1j), (1+1j)
#tls_bus_centers[37, 38], tls_bus_centers[38, 37] = y_bus[37, 38], y_bus[38, 37]
elif prior_type == "wrong":
# Node 2
tls_bus_weights[0, :], tls_bus_weights[:, 0] = (1+1j), (1+1j)
tls_bus_weights[0, 0] = 0
tls_bus_centers[0, :], tls_bus_centers[:, 0] = 0, 0
tls_bus_centers[1, 0], tls_bus_centers[0, 1] = y_bus[1, 0], y_bus[0, 1]
#tls_bus_centers[1, 2], tls_bus_centers[2, 1] = y_bus[1, 2], y_bus[2, 1]

# Node 50 & 51
tls_bus_weights[26, :], tls_bus_weights[:, 26] = (1+1j), (1+1j)
tls_bus_weights[25, :], tls_bus_weights[:, 25] = (1+1j), (1+1j)
tls_bus_weights[26, 26] = 0
tls_bus_weights[25, 25] = 0
tls_bus_centers[26, :], tls_bus_centers[:, 26] = 0, 0
tls_bus_centers[25, :], tls_bus_centers[:, 25] = 0, 0
tls_bus_centers[24, 25], tls_bus_centers[25, 24] = y_bus[24, 25], y_bus[25, 24]
tls_bus_centers[26, 25], tls_bus_centers[25, 26] = y_bus[26, 25], y_bus[25, 26]
tls_bus_centers[26, 27], tls_bus_centers[27, 26] = y_bus[26, 27], y_bus[27, 26]
tls_bus_centers[26, 28], tls_bus_centers[28, 26] = y_bus[26, 28], y_bus[28, 26]

# Vectorize and add to the rest
tls_bus_weights = np.where(np.abs(make_real_vector(E @ vectorize_matrix(tls_bus_weights))) > 0)[0]
tls_bus_centers = make_real_vector(E @ vectorize_matrix(tls_bus_centers))

prior.add_exact_adaptive_prior(indices=tls_bus_weights,
values=1.1 * tls_bus_centers[tls_bus_weights],
weights=0.01 * lambdaprime,
values=tls_bus_centers[tls_bus_weights] * (1.1 if "wrong" in prior_type else 1.0),
weights=lambdaprime * (0.01 if "wrong" in prior_type else 1.0),
orders=SmoothPrior.LAPLACE)

"""
Expand Down Expand Up @@ -364,8 +345,8 @@ def pprint(a):
return y_ols, y_tls, y_lasso


def bayesian_eiv(name, voltage, current, phases_ids, voltage_sd_polar, current_sd_polar, pmu_ratings,
y_init, y_exact=None, laplacian=False, weighted=False, max_iterations=50, verbose=True):
def bayesian_eiv(name, voltage, current, phases_ids, voltage_sd_polar, current_sd_polar, pmu_ratings, y_init,
y_exact=None, laplacian=False, weighted=False, max_iterations=50, use_pmu_data=True, verbose=True):
# L1 Regularized weighted TLS
"""
# Computing the Maximum A Posteriori Estimator,
Expand All @@ -387,6 +368,7 @@ def bayesian_eiv(name, voltage, current, phases_ids, voltage_sd_polar, current_s
:param laplacian: is the admittance matrix Laplacian?
:param weighted: Use covariances or just identity (classical TLS)?
:param max_iterations: maximum number of lasso iterations
:param use_pmu_data: removes the phase synchronization if false
:param verbose: verbose ON/OFF
"""

Expand All @@ -402,7 +384,7 @@ def pprint(a):
identification.contrast_each_row, identification.regularize_diag)

if y_exact is not None:
prior = add_known_lines(nodes, prior, identification.lambdaprime, y_exact, laplacian=False, prior_type="suboptimal")
prior = add_known_lines(nodes, prior, identification.lambdaprime, y_exact, laplacian=False, prior_type="optimal")
#prior = build_loads_id_prior(nodes, y_exact) # Not working!

if laplacian:
Expand All @@ -418,24 +400,39 @@ def pprint(a):
if max_iterations > 0 and voltage is not None and current is not None:
inv_sigma_voltage = None
inv_sigma_current = None
if weighted:
centered_voltage = voltage.copy()
centered_current = current.copy()

if weighted and use_pmu_data:
pprint("Calculating covariance matrices...")
inv_sigma_voltage = average_true_noise_covariance(voltage, np.real(voltage_sd_polar),
np.imag(voltage_sd_polar), True)
inv_sigma_current = average_true_noise_covariance(current, np.real(current_sd_polar) * pmu_ratings,
np.imag(current_sd_polar), True)
pprint("Done!")
elif weighted and not use_pmu_data:
centered_voltage = np.abs(voltage.copy())
centered_current = ((current * voltage.conj()) / np.abs(voltage)).copy()

pprint("Calculating covariance matrices...")
inv_sigma_voltage = naive_noise_covariance(centered_voltage, np.real(voltage_sd_polar),
np.imag(voltage_sd_polar) * 100, False)
inv_sigma_current = naive_noise_covariance(current, np.real(current_sd_polar) * pmu_ratings,
np.imag(current_sd_polar), True)
# invert only diagonal elements (off diagonal is zero)
inv_sigma_voltage.data = 1.0 / inv_sigma_voltage.data
pprint("Done!")

noisy_voltage = voltage.copy()
noisy_current = current.copy()
if laplacian:
noisy_voltage = noisy_voltage - np.mean(noisy_voltage)
centered_voltage = centered_voltage - np.mean(centered_voltage)
else:
noisy_voltage = noisy_voltage - np.tile(np.mean(noisy_voltage, axis=0), (noisy_voltage.shape[0], 1))
noisy_current = noisy_current - np.tile(np.mean(noisy_current, axis=0), (noisy_current.shape[0], 1))
centered_voltage = centered_voltage - np.tile(np.mean(centered_voltage, axis=0),
(centered_voltage.shape[0], 1))
centered_current = centered_current - np.tile(np.mean(centered_current, axis=0),
(centered_current.shape[0], 1))

pprint("STLS identification...")
sparse_tls_cov.fit(noisy_voltage, noisy_current, inv_sigma_voltage, inv_sigma_current, y_init=y_init)
sparse_tls_cov.fit(centered_voltage, centered_current, inv_sigma_voltage, inv_sigma_current, y_init=y_init)
pprint("Done!")

pprint("Extracting results...")
Expand Down
7 changes: 4 additions & 3 deletions src/identification/run3ph.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,8 +211,8 @@ def pprint(a):
return y_ols, y_tls, y_lasso


def bayesian_eiv(name, voltage, current, phases_ids, voltage_sd_polar, current_sd_polar, pmu_ratings,
y_init, y_exact=None, laplacian=False, weighted=False, max_iterations=50, verbose=True):
def bayesian_eiv(name, voltage, current, phases_ids, voltage_sd_polar, current_sd_polar, pmu_ratings, y_init,
y_exact=None, laplacian=False, weighted=False, max_iterations=50, use_pmu_data=True, verbose=True):
# L1 Regularized weighted TLS
"""
# Computing the Maximum Likelihood Estimator,
Expand All @@ -234,6 +234,7 @@ def bayesian_eiv(name, voltage, current, phases_ids, voltage_sd_polar, current_s
:param laplacian: is the admittance matrix Laplacian?
:param weighted: Use covariances or just identity (classical TLS)?
:param max_iterations: maximum number of lasso iterations
:param use_pmu_data: removes the phase synchronization if false
:param verbose: verbose ON/OFF
"""
if verbose:
Expand All @@ -254,7 +255,7 @@ def pprint(a):
voltage_sd_polar, current_sd_polar, pmu_ratings[phases_ids == i],
y_init[phases_ids == i, :][:, phases_ids == i], None if y_exact is None else
y_exact[phases_ids == i, :][:, phases_ids == i], laplacian, weighted, max_iterations,
verbose)
use_pmu_data, verbose)
y_fin[np.outer(phases_ids == i, phases_ids == i)] = y_sparse_tls_cov.flatten()

sparse_tls_cov_iterations = []
Expand Down
2 changes: 1 addition & 1 deletion src/simulation/run.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import numpy as np

from conf import conf
from conf import conf, simulation
from src.simulation.simulation import SimulatedNet
from src.simulation.load_profile import load_profile_from_numpy, generate_gaussian_load
from src.simulation.noise import filter_and_resample_measurement, add_polar_noise_to_measurement
Expand Down

0 comments on commit c463f25

Please sign in to comment.