Skip to content
Snippets Groups Projects

Feature/variable hourly dist table

Closed emanuel buzey requested to merge feature/variable_hourly_dist_table into dev
3 files
+ 72
27
Compare changes
  • Side-by-side
  • Inline
Files
3
  • - Implemented CUPS validation using the stdnum library to ensure that each CUPS adheres to the correct format and standards.
    - Added validation for the sum of coefficients per row to equal 1, incorporating a tolerance margin to account for floating-point arithmetic imprecision. This ensures accurate distribution table data import.
    - Introduced a new boolean field in the DistributionTable model to track error occurrences during batch processing of import jobs. This change addresses the asynchronous nature of the import process, allowing for the effective halting of the operation upon detecting errors in any batch.
@@ -9,6 +9,10 @@ import werkzeug
@@ -9,6 +9,10 @@ import werkzeug
from odoo import _, api, fields, models
from odoo import _, api, fields, models
from odoo.exceptions import UserError, ValidationError
from odoo.exceptions import UserError, ValidationError
from odoo.http import request
from odoo.http import request
 
from stdnum.es import cups
 
from stdnum.exceptions import *
 
 
import math
logger = logging.getLogger(__name__)
logger = logging.getLogger(__name__)
@@ -36,7 +40,7 @@ class DistributionTableImportWizard(models.TransientModel):
@@ -36,7 +40,7 @@ class DistributionTableImportWizard(models.TransientModel):
string="File Encoding",
string="File Encoding",
help="Enconding format in import CSV file.",
help="Enconding format in import CSV file.",
)
)
@api.constrains("import_file")
@api.constrains("import_file")
def _constrains_import_file(self):
def _constrains_import_file(self):
if self.fname:
if self.fname:
@@ -45,19 +49,48 @@ class DistributionTableImportWizard(models.TransientModel):
@@ -45,19 +49,48 @@ class DistributionTableImportWizard(models.TransientModel):
raise ValidationError(_("Only csv format files are accepted."))
raise ValidationError(_("Only csv format files are accepted."))
def import_file_button(self):
def import_file_button(self):
 
# Button to import file, checks whether it is necessary to enqueue or not according to the type field
file_data = base64.b64decode(self.import_file)
file_data = base64.b64decode(self.import_file)
parsing_data = self.with_context(active_id=self.ids[0])._parse_file(file_data)
parsing_data = self.with_context(active_id=self.ids[0])._parse_file(file_data)
active_id = self.env.context.get("active_id")
active_id = self.env.context.get("active_id")
distribution_table = self.env[
distribution_table = self.env['energy_selfconsumption.distribution_table'].browse(active_id)
"energy_selfconsumption.distribution_table"
distribution_table.import_error_found = False
].browse(active_id)
type = distribution_table.type
self.import_all_lines(parsing_data, distribution_table)
 
if type == 'variable_schedule':
 
# For 'variable_schedule', queue job
 
distribution_table.message_post(
 
subject=_("Importation in Progress"),
 
body=_("The distribution table import is being processed in the background. Any errors will be reported here."),
 
)
 
 
header, data_rows = parsing_data[0], parsing_data[1:]
 
cups_groups = self._divide_in_groups(data_rows, 20) # Divide parsing_data in groups to optimized the memory use
 
for group in cups_groups:
 
# Add header before to process
 
if distribution_table.import_error_found:
 
break
 
group_with_header = [header] + group
 
self.with_delay().process_variable_schedule(group_with_header, active_id)
 
else:
 
# For other type, syncron type
 
self.import_all_lines(parsing_data, distribution_table)
return True
return True
 
def _divide_in_groups(self, data, group_size):
 
"""Divide the data into groups to process them separately."""
 
return [data[i:i + group_size] for i in range(0, len(data), group_size)]
 
def download_template_button(self):
def download_template_button(self):
distribution_table_example_attachment = self.env.ref(
type = self.env.context.get("type")
"energy_selfconsumption.distribution_table_example_attachment"
if type == "fixed":
)
template = (
 
"energy_selfconsumption.distribution_table_fixed_example_attachment"
 
)
 
elif type == "variable_schedule":
 
template = "energy_selfconsumption.distribution_table_variable_schedule_example_attachment"
 
 
distribution_table_example_attachment = self.env.ref(template)
download_url = "/web/content/{}/?download=true".format(
download_url = "/web/content/{}/?download=true".format(
str(distribution_table_example_attachment.id)
str(distribution_table_example_attachment.id)
)
)
@@ -97,9 +130,110 @@ class DistributionTableImportWizard(models.TransientModel):
@@ -97,9 +130,110 @@ class DistributionTableImportWizard(models.TransientModel):
supply_point_assignation_values_list.append((0, 0, value))
supply_point_assignation_values_list.append((0, 0, value))
distribution_table.write(
distribution_table.write(
{"supply_point_assignation_ids": supply_point_assignation_values_list}
{"supply_point_assignation_ids": supply_point_assignation_values_list}
 
)
 
 
def process_variable_schedule(self, data, distribution_table_id):
 
DistributionTableVariable = self.env['energy_selfconsumption.distribution_table_variable']
 
DistributionTableVariableCoefficient = self.env['energy_selfconsumption.distribution_table_var_coeff']
 
cups_ids = data[0][1:] # Get CUPS from header row
 
logger.debug('CUPS IDs found: %s', cups_ids)
 
# Validate CUPS before to process
 
for cups in cups_ids:
 
is_valid, error_message = self.validate_cups_id(cups, distribution_table_id)
 
if not is_valid:
 
logger.error(error_message)
 
return
 
hours_data = data[1:] # The rest of the data contains hours and coefficients
 
coefficients_batch = []
 
batch_size = 50
 
 
# Pre-search existing records to avoid repeated searches
 
existing_variables = DistributionTableVariable.search([
 
('cups_id', 'in', cups_ids),
 
('distribution_table_id', '=', distribution_table_id)
 
])
 
existing_cups_map = {var.cups_id: var for var in existing_variables}
 
logger.debug('Preloaded %d existing records from DistributionTableVariable', len(existing_variables))
 
 
for row_index, row in enumerate(hours_data, start=1):
 
hour = int(row[0])
 
coefficients = list(map(float, row[1:])) # Convert coefficients to float
 
# Validate that the sum of coefficients is 1
 
is_valid, error_message = self.validate_coefficients(coefficients, hour, distribution_table_id)
 
if not is_valid:
 
logger.error(error_message)
 
return
 
 
logger.debug('Processing data for time: %d', hour)
 
for cups_index, coefficient in enumerate(row[1:], start=1):
 
cups_id = cups_ids[cups_index - 1]
 
variable_record = existing_cups_map.get(cups_id)
 
if not variable_record:
 
variable_record = DistributionTableVariable.create({
 
'distribution_table_id': distribution_table_id,
 
'cups_id': cups_id,
 
})
 
existing_cups_map[cups_id] = variable_record
 
logger.debug('Created new DistributionTableVariable record for CUPS ID: %s', cups_id)
 
 
coefficients_batch.append({
 
'distribution_table_variable_id': variable_record.id,
 
'hour': hour,
 
'coefficient': float(coefficient),
 
})
 
 
if len(coefficients_batch) >= batch_size:
 
DistributionTableVariableCoefficient.create(coefficients_batch)
 
logger.debug('Batch processing of %d coefficients for time %d', len(coefficients_batch), hour)
 
coefficients_batch.clear()
 
 
if hour == 8760:
 
distribution_table = self.env['energy_selfconsumption.distribution_table'].browse(distribution_table_id)
 
distribution_table.message_post(
 
body=_("The import process has been completed successfully."),
 
subject=_("Import Completed"),
 
)
 
if coefficients_batch:
 
DistributionTableVariableCoefficient.create(coefficients_batch)
 
logger.debug('Processed last batch of %d coefficients', len(coefficients_batch))
 
 
logger.debug('Completing the import process for the Distribution Table with ID: %s', distribution_table_id)
 
 
def validate_cups_id(self, cups_id, distribution_table_id):
 
try:
 
cups.validate(cups_id)
 
return True, ""
 
except InvalidLength:
 
error_message = _("Invalid CUPS %s: The length is incorrect." % (cups_id))
 
except InvalidComponent:
 
error_message = _("Invalid CUPS %s: The CUPS does not start with 'ES'." % (cups_id))
 
except InvalidFormat:
 
error_message = _("Invalid CUPS %s: The CUPS has an incorrect format." % (cups_id))
 
except InvalidChecksum:
 
error_message = _("Invalid CUPS %s: The checksum of the CUPS is incorrect." % (cups_id))
 
 
distribution_table = self.env['energy_selfconsumption.distribution_table'].browse(distribution_table_id)
 
message_status = distribution_table.message_post(
 
subject=_("CUPS Validation Error"),
 
body=error_message,
)
)
 
return False, error_message
 
 
def validate_coefficients(self, coefficients, hour, distribution_table_id):
 
total_sum = sum(coefficients)
 
# Added tolerance margin to account for floating-point arithmetic imprecision.
 
if not math.isclose(total_sum, 1.0, rel_tol=1e-9):
 
error_message = _('The sum of coefficients for hour %s does not equal 1' % hour)
 
distribution_table = self.env['energy_selfconsumption.distribution_table'].browse(distribution_table_id)
 
message_status = distribution_table.message_post(
 
subject=_("Coefficient Validation Error"),
 
body=error_message,
 
)
 
distribution_table.import_error_found = True
 
return False, error_message
 
return True, ""
def get_supply_point_assignation_values(self, line):
def get_supply_point_assignation_values(self, line):
return {
return {
Loading