diff --git a/common/exclusive_state_groups.impl.h b/common/exclusive_state_groups.impl.h index 53e4e83c..4a5b1228 100644 --- a/common/exclusive_state_groups.impl.h +++ b/common/exclusive_state_groups.impl.h @@ -74,10 +74,11 @@ void ExclusiveStateGroup::explain_requires(con log_info("Placing cell %s at bel %s does not violate %s.%s\n", cell.c_str(ctx), ctx->nameOfBel(bel), object.c_str(ctx), definition.prefix.c_str(ctx)); } else { + log_info("%d\n", state); log_info("Placing cell %s at bel %s does violates %s.%s, because current state is %s, constraint requires one " "of:\n", - cell.c_str(ctx), ctx->nameOfBel(bel), object.c_str(ctx), definition.prefix.c_str(ctx), - definition.states.at(state).c_str(ctx)); + cell.c_str(ctx), ctx->nameOfBel(bel), object.c_str(ctx), definition.prefix.c_str(ctx), "-1"); +// definition.states.at(state).c_str(ctx)); for (const auto required_state : state_range) { log_info(" - %s\n", definition.states.at(required_state).c_str(ctx)); diff --git a/fpga_interchange/arch.h b/fpga_interchange/arch.h index 7873a8ec..fab6c700 100644 --- a/fpga_interchange/arch.h +++ b/fpga_interchange/arch.h @@ -720,6 +720,7 @@ struct Arch : ArchAPI // Clusters void pack_cluster(); void prepare_cluster(const ClusterPOD *cluster, uint32_t index); + void prepare_macro_cluster(const ClusterPOD *cluster, uint32_t index); dict clusters; // User constraints diff --git a/fpga_interchange/arch_pack_clusters.cc b/fpga_interchange/arch_pack_clusters.cc index f4e50233..3514d7c7 100644 --- a/fpga_interchange/arch_pack_clusters.cc +++ b/fpga_interchange/arch_pack_clusters.cc @@ -40,7 +40,8 @@ enum ClusterWireNodeState enum ExpansionDirection { CLUSTER_UPHILL_DIR = 0, - CLUSTER_DOWNHILL_DIR = 1 + CLUSTER_DOWNHILL_DIR = 1, + CLUSTER_BOTH_DIR = 2 }; struct ClusterWireNode @@ -370,6 +371,330 @@ static bool check_cluster_cells_compatibility(CellInfo *old_cell, CellInfo *new_ return true; } +bool reduce(uint32_t x, uint32_t y, const ClusterPOD *cluster, dict> &domain, Context *ctx){ + bool change = false; + std::vector remove_cell; + uint32_t counter = 0; + for (const auto &connection : cluster->connection_graph[x].connections){ + if(connection.target_idx == y) + break; + counter ++; + } + for (const auto &x_cell : domain[x]){ + if (ctx->verbose) + log_info("Testing cell %s\n", x_cell->name.c_str(ctx)); + bool found = false; + for (const auto &y_cell : domain[y]){ + if (ctx->verbose) + log_info(" - Y candidate: %s\n", y_cell->name.c_str(ctx)); + for (const auto edge : cluster->connection_graph[x].connections[counter].edges){ + if (!x_cell->ports.count(IdString(edge.cell_pin)) || !y_cell->ports.count(IdString(edge.other_cell_pin))) + break; + const auto x_net = x_cell->ports[IdString(edge.cell_pin)].net; + const auto y_net = y_cell->ports[IdString(edge.other_cell_pin)].net; + + if (x_net != y_net) + break; + bool x_driver = x_net->driver.cell == x_cell; + bool y_driver = y_net->driver.cell == y_cell; + if ((edge.dir != 0 || !y_driver) && (edge.dir != 1 || !x_driver) && (edge.dir != 2 || y_driver || x_driver)) + break; + found = true; + } + if (found){ + log_info(" - Works for %s\n", y_cell->name.c_str(ctx)); + break; + } + } + if (!found) + remove_cell.push_back(x_cell); + } + + for (const auto &cell : remove_cell){ + domain[x].erase(cell); + change = true; + } + + return change; +} + +void binary_constraint_check(const ClusterPOD *cluster, + std::queue> &workqueue, + dict> &idx_to_cells, Context *ctx){ + while (!workqueue.empty()){ + std::pair arc = workqueue.front(); + workqueue.pop(); + uint32_t x,y; + x = arc.first; y = arc.second; + if (ctx->verbose) + log_info("Checking pair %d:%d\n", x, y); + if (reduce(x, y, cluster, idx_to_cells, ctx)){ + for (const auto &connection : cluster->connection_graph[arc.first].connections) + if (connection.target_idx != y) + workqueue.push(std::pair(arc.first, connection.target_idx)); + } + } +} + +bool back_solver(const ClusterPOD *cluster, + dict> &idx_to_cells, Context *ctx){ + dict, hash_ptr_ops> possible_idx; + for (const auto &arc : idx_to_cells) + for (const auto &cell : arc.second) + possible_idx[cell].insert(arc.first); + std::queue prep; + for (const auto &arc : idx_to_cells){ + if (arc.second.size() == 0) + return false; + if (arc.second.size()>1){ + for (const auto &cell : arc.second){ + auto copy_idx_to_cells(idx_to_cells); + copy_idx_to_cells[arc.first].clear(); + for (uint32_t idx : possible_idx[cell]){ + copy_idx_to_cells[idx].erase(cell); + prep.push(idx); + } + copy_idx_to_cells[arc.first].insert(cell); + std::queue> workqueue; + while(!prep.empty()){ + uint32_t idx = prep.front(); prep.pop(); + for (const auto &connection : cluster->connection_graph[idx].connections) + if (arc.first != connection.target_idx) + workqueue.push(std::pair(arc.first, connection.target_idx)); + } + binary_constraint_check(cluster, workqueue, copy_idx_to_cells, ctx); + if (back_solver(cluster, copy_idx_to_cells, ctx)){ + idx_to_cells = std::move(copy_idx_to_cells); + return true; + } + } + } + } + return true; +} + +void Arch::prepare_macro_cluster( const ClusterPOD *cluster, uint32_t index) +{ + Context *ctx = getCtx(); + IdString cluster_name(cluster->name); + + pool cluster_cell_types; + for (auto cell_type : cluster->root_cell_types) + cluster_cell_types.insert(IdString(cell_type)); + + // Find cluster roots for each macro only ones + dict roots; + for (auto &cell : cells){ + CellInfo *ci = cell.second.get(); + if(ci->macro_parent == IdString()) + continue; + if(ci->cluster != ClusterId()) + continue; + if (!cluster_cell_types.count(ci->type)) + continue; + if(roots.count(ci->macro_parent)) + continue; + // Simple check based on cell type counting + dict cells_in_macro, counter; + pool cell_types; + for (auto &cell_type : cluster->required_cells){ + cells_in_macro[IdString(cell_type.name)] = cell_type.count; + cell_types.insert(IdString(cell_type.name)); + } + + for (auto &node_cell : macro_to_cells[ci->macro_parent]){ + auto cell_type = node_cell->type; + if(!counter.count(cell_type)) + counter[cell_type] = 0; + counter[cell_type]++; + cell_types.insert(cell_type); + } + bool failed = false; + for(auto cell_type : cell_types){ + if(ctx->verbose && cells_in_macro.count(cell_type)) + log_info("Required: %s %d\n", cell_type.c_str(ctx), cells_in_macro[cell_type]); + if(ctx->verbose && cells_in_macro.count(cell_type)) + log_info("Have: %s %d\n", cell_type.c_str(ctx), counter[cell_type]); + if(!cells_in_macro.count(cell_type) || !counter.count(cell_type) || cells_in_macro[cell_type] != counter[cell_type]) + failed = true; + if(failed && ctx->verbose) + log_info("Cell count stage failed, for sure not this cluster\n"); + if(failed) + break; + } + if(failed){ + roots[ci->macro_parent] = nullptr; + continue; + } + + // Arc consistency + dict> idx_to_cells; + // First singular constraints, like used cell type and used_cell ports + for (auto &cell : macro_to_cells[ci->macro_parent]) + for (auto &node : cluster->connection_graph) + if (IdString(node.cell_type) == cell->type) + if (node.idx != 0 && cell->name != ci->name || + node.idx == 0 && cell->name == ci->name ){ + idx_to_cells[node.idx].insert(cell); + } + + for (auto &arc : idx_to_cells){ + std::vector remove_cell; + pool used_ports; + for (const auto &port : cluster->connection_graph[arc.first].used_ports) + used_ports.insert(IdString(port.name)); + for (const auto &cell : arc.second){ + uint32_t count = 0; + for (const auto &port : cell->ports){ + if (!used_ports.count(port.first)){ + remove_cell.push_back(cell); + break; + } + count++; + } + if (count != used_ports.size()){ + remove_cell.push_back(cell); + break; + } + } + for (const auto &cell : remove_cell){ + arc.second.erase(cell); + } + } + if (ctx->verbose){ + log_info("After mono constraints are applied\n"); + dict, hash_ptr_ops> possible_idx; + for (const auto &arc : idx_to_cells) + for (const auto &cell : arc.second) + possible_idx[cell].insert(arc.first); + + for (const auto arc : possible_idx){ + log_info("Possible idx %s:\n", arc.first->name.c_str(ctx)); + for (const auto idx : arc.second) + log_info(" - %d\n", idx); + } + } + // Solve for binary constraints + std::queue> workqueue; + for (const auto &arc : idx_to_cells) + for (const auto &connection : cluster->connection_graph[arc.first].connections) + workqueue.push(std::pair(arc.first, connection.target_idx)); + + binary_constraint_check(cluster, workqueue, idx_to_cells, ctx); + for (const auto &arc : idx_to_cells){ + if (arc.second.size() == 0){ + if (ctx->verbose) + log_info("AC-3 failed\n"); + failed = true; + break; + } + } + if (failed) + continue; + + if (ctx->verbose){ + log_info("After AC-3\n"); + dict, hash_ptr_ops> possible_idx; + for (const auto &arc : idx_to_cells) + for (const auto &cell : arc.second) + possible_idx[cell].insert(arc.first); + + for (const auto arc : possible_idx){ + log_info("Possible idx %s:\n", arc.first->name.c_str(ctx)); + for (const auto idx : arc.second) + log_info(" - %d\n", idx); + } + } + + bool change = false; + std::queue> removequeue; + // Keep assigning cells to indices that only map to single cell + // Remove this cell from other mappings and recheck binary constraints + // Fail if there is no cell for idx or cell has no idx assign + do{ + change = false; + dict, hash_ptr_ops> possible_idx; + pool changed_idxs; + for (const auto &arc : idx_to_cells){ + if (arc.second.size() == 0){ + failed = true; + break; + } + for (const auto &cell : arc.second) + possible_idx[cell].insert(arc.first); + } + if(failed) + break; + for (auto &cell : macro_to_cells[ci->macro_parent]) + if (possible_idx[cell].size() == 0){ + failed = true; + break; + } + if(failed) + break; + for (const auto &arc : idx_to_cells){ + if (arc.second.size() == 1) + for (const auto &idx : possible_idx[*arc.second.begin()]) + if (idx != arc.first) + removequeue.push(std::pair(idx, *arc.second.begin())); + } + while(!removequeue.empty()){ + auto t = removequeue.front(); removequeue.pop(); + uint32_t idx = t.first; + CellInfo *cell = t.second; + idx_to_cells[idx].erase(cell); + change = true; + changed_idxs.insert(idx); + } + for (const uint32_t &idx : changed_idxs) + for (const auto &connection : cluster->connection_graph[idx].connections) + workqueue.push(std::pair(idx, connection.target_idx)); + + binary_constraint_check(cluster, workqueue, idx_to_cells, ctx); + }while(change); + if(failed){ + if(ctx->verbose) + log_info("Single cell mapping failed\n"); + continue; + } + if (ctx->verbose){ + log_info("After mapping indices with single cell\n"); + dict, hash_ptr_ops> possible_idx; + for (const auto &arc : idx_to_cells) + for (const auto &cell : arc.second) + possible_idx[cell].insert(arc.first); + + for (const auto arc : possible_idx){ + log_info("Possible idx %s:\n", arc.first->name.c_str(ctx)); + for (const auto idx : arc.second) + log_info(" - %d\n", idx); + } + } + // At this point all indices that cloud only be mapped to single cell are mapped + // Next step is to run solver with backtracing to solve for other idx<->cell mappings + if (ctx->verbose) + log_info("Back solver\n"); + if(!back_solver(cluster, idx_to_cells, ctx)){ + if(ctx->verbose) + log_info("Back solver failed\n"); + continue; + } + if (ctx->verbose){ + log_info("Final mapping after back solver\n"); + dict, hash_ptr_ops> possible_idx; + for (const auto &arc : idx_to_cells) + for (const auto &cell : arc.second) + possible_idx[cell].insert(arc.first); + + for (const auto arc : possible_idx){ + log_info("Possible idx %s:\n", arc.first->name.c_str(ctx)); + for (const auto idx : arc.second) + log_info(" - %d\n", idx); + } + } + } +} + void Arch::prepare_cluster(const ClusterPOD *cluster, uint32_t index) { Context *ctx = getCtx(); @@ -383,6 +708,8 @@ void Arch::prepare_cluster(const ClusterPOD *cluster, uint32_t index) std::vector roots; for (auto &cell : cells) { CellInfo *ci = cell.second.get(); + if (ci->macro_parent != IdString()) + continue; if (ci->cluster != ClusterId()) continue; @@ -564,9 +891,17 @@ void Arch::pack_cluster() dump_clusters(chip_info, ctx); for (uint32_t i = 0; i < chip_info->clusters.size(); ++i) { - const auto &cluster = chip_info->clusters[i]; + if (!chip_info->clusters[i].from_macro){ + const auto &cluster = chip_info->clusters[i]; - prepare_cluster(&cluster, i); + prepare_cluster(&cluster, i); + } else { + const auto &cluster = chip_info->clusters[i]; + if(ctx->verbose) + log_info("%s\n", IdString(cluster.name).c_str(ctx)); + + prepare_macro_cluster(&cluster, i); + } } } diff --git a/fpga_interchange/chipdb.h b/fpga_interchange/chipdb.h index 85dc7f25..1086976a 100644 --- a/fpga_interchange/chipdb.h +++ b/fpga_interchange/chipdb.h @@ -34,7 +34,12 @@ NEXTPNR_NAMESPACE_BEGIN * kExpectedChipInfoVersion */ -static constexpr int32_t kExpectedChipInfoVersion = 14; +static constexpr int32_t kExpectedChipInfoVersion = 15; + +NPNR_PACKED_STRUCT(struct BelConnectedPinsPOD { + uint32_t pin1; + uint32_t pin2; +}); // Flattened site indexing. // @@ -80,6 +85,8 @@ NPNR_PACKED_STRUCT(struct BelInfoPOD { int8_t inverting_pin; int16_t padding; + + RelSlice connected_pins; }); enum BELCategory @@ -416,13 +423,45 @@ NPNR_PACKED_STRUCT(struct ChainablePortPOD { int16_t avg_y_offset; }); +NPNR_PACKED_STRUCT(struct ClusterRequiredCellPOD{ + uint32_t name; + uint32_t count; +}); + +NPNR_PACKED_STRUCT(struct ClusterUsedPortPOD{ + uint32_t name; +}); + +NPNR_PACKED_STRUCT(struct ClusterEdgePOD{ + uint32_t dir; + uint32_t cell_pin; + uint32_t other_cell_pin; + uint32_t other_cell_type; +}); + +NPNR_PACKED_STRUCT(struct ClusterConnectionsPOD{ + uint32_t target_idx; + RelSlice edges; +}); + +NPNR_PACKED_STRUCT(struct ClusterConnectionGraphPOD{ + uint32_t idx; + uint32_t cell_type; + RelSlice connections; + RelSlice used_ports; +}); + + NPNR_PACKED_STRUCT(struct ClusterPOD { uint32_t name; RelSlice root_cell_types; RelSlice chainable_ports; RelSlice cluster_cells_map; + RelSlice required_cells; + RelSlice connection_graph; uint32_t out_of_site_clusters; uint32_t disallow_other_cells; + uint32_t from_macro; }); NPNR_PACKED_STRUCT(struct ChipInfoPOD { diff --git a/fpga_interchange/macros.cc b/fpga_interchange/macros.cc index 762615c1..4011f683 100644 --- a/fpga_interchange/macros.cc +++ b/fpga_interchange/macros.cc @@ -50,6 +50,7 @@ static IdString derived_name(Context *ctx, IdString base_name, IdString suffix) void Arch::expand_macros() { + log_info("Expand macros\n"); // Make up a list of cells, so we don't have modify-while-iterating issues Context *ctx = getCtx(); std::vector cells; @@ -78,6 +79,7 @@ void Arch::expand_macros() // Get the ultimate root of this macro expansion IdString parent = (cell->macro_parent == IdString()) ? cell->name : cell->macro_parent; + log_info("%s %s\n", cell->name.c_str(ctx), parent.c_str(ctx)); // Create child instances for (const auto &inst : macro->cell_insts) { CellInfo *inst_cell = @@ -86,6 +88,7 @@ void Arch::expand_macros() inst_cell->params[IdString(param.key)] = IdString(param.value).str(ctx); } inst_cell->macro_parent = parent; + log_info(" %s %s\n", inst_cell->name.c_str(ctx), inst_cell->type.c_str(ctx)); next_cells.push_back(inst_cell); } // Create and connect nets